chansung commited on
Commit
74cd6fc
·
verified ·
1 Parent(s): d4dfc13

Model save

Browse files
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: gemma
4
+ base_model: google/gemma-7b
5
+ tags:
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
+ datasets:
10
+ - generator
11
+ model-index:
12
+ - name: gemma7b-kasa-coding-11-v1
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # gemma7b-kasa-coding-11-v1
20
+
21
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the generator dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 9.9363
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 0.0002
43
+ - train_batch_size: 8
44
+ - eval_batch_size: 8
45
+ - seed: 42
46
+ - distributed_type: multi-GPU
47
+ - num_devices: 8
48
+ - gradient_accumulation_steps: 2
49
+ - total_train_batch_size: 128
50
+ - total_eval_batch_size: 64
51
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
52
+ - lr_scheduler_type: cosine
53
+ - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 1
55
+
56
+ ### Training results
57
+
58
+ | Training Loss | Epoch | Step | Validation Loss |
59
+ |:-------------:|:-----:|:----:|:---------------:|
60
+ | 10.3696 | 1.0 | 140 | 9.9363 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - PEFT 0.13.1.dev0
66
+ - Transformers 4.46.2
67
+ - Pytorch 2.5.1+cu124
68
+ - Datasets 3.1.0
69
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 4.268850850782249e+17,
4
+ "train_loss": 14.163321549551828,
5
+ "train_runtime": 384.2161,
6
+ "train_samples": 51241,
7
+ "train_samples_per_second": 46.599,
8
+ "train_steps_per_second": 0.364
9
+ }
runs/Nov18_16-49-44_bold-food-flourishes-fin-02/events.out.tfevents.1731950056.bold-food-flourishes-fin-02.72110.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b66d68b80a2325a4751cf94d94d2c13cc725dbf083491c749c7a645e9c0a4a06
3
- size 11737
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08dfef669035b29af38b724a9d0a9860238cf054abca4afa72601b8936919fbd
3
+ size 12362
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 4.268850850782249e+17,
4
+ "train_loss": 14.163321549551828,
5
+ "train_runtime": 384.2161,
6
+ "train_samples": 51241,
7
+ "train_samples_per_second": 46.599,
8
+ "train_steps_per_second": 0.364
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 140,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.007142857142857143,
13
+ "grad_norm": 1138.7742919921875,
14
+ "learning_rate": 1.4285714285714285e-05,
15
+ "loss": 48.0816,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.03571428571428571,
20
+ "grad_norm": 242.92247009277344,
21
+ "learning_rate": 7.142857142857143e-05,
22
+ "loss": 38.2264,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.07142857142857142,
27
+ "grad_norm": 99.10001373291016,
28
+ "learning_rate": 0.00014285714285714287,
29
+ "loss": 26.8907,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.10714285714285714,
34
+ "grad_norm": 51.8281364440918,
35
+ "learning_rate": 0.00019996891820008164,
36
+ "loss": 23.0483,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.14285714285714285,
41
+ "grad_norm": 16.425758361816406,
42
+ "learning_rate": 0.00019888308262251285,
43
+ "loss": 20.2011,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.17857142857142858,
48
+ "grad_norm": 8.041358947753906,
49
+ "learning_rate": 0.0001962624246950012,
50
+ "loss": 18.1936,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.21428571428571427,
55
+ "grad_norm": 5.986516952514648,
56
+ "learning_rate": 0.00019214762118704076,
57
+ "loss": 16.4036,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.25,
62
+ "grad_norm": 3.8124237060546875,
63
+ "learning_rate": 0.00018660254037844388,
64
+ "loss": 15.1102,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.2857142857142857,
69
+ "grad_norm": 3.229938268661499,
70
+ "learning_rate": 0.00017971325072229226,
71
+ "loss": 14.0912,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 0.32142857142857145,
76
+ "grad_norm": 3.9065780639648438,
77
+ "learning_rate": 0.00017158668492597186,
78
+ "loss": 13.3336,
79
+ "step": 45
80
+ },
81
+ {
82
+ "epoch": 0.35714285714285715,
83
+ "grad_norm": 4.85410213470459,
84
+ "learning_rate": 0.00016234898018587337,
85
+ "loss": 12.878,
86
+ "step": 50
87
+ },
88
+ {
89
+ "epoch": 0.39285714285714285,
90
+ "grad_norm": 4.169612884521484,
91
+ "learning_rate": 0.0001521435203379498,
92
+ "loss": 12.3397,
93
+ "step": 55
94
+ },
95
+ {
96
+ "epoch": 0.42857142857142855,
97
+ "grad_norm": 2.173008441925049,
98
+ "learning_rate": 0.00014112871031306119,
99
+ "loss": 11.9716,
100
+ "step": 60
101
+ },
102
+ {
103
+ "epoch": 0.4642857142857143,
104
+ "grad_norm": 2.008362054824829,
105
+ "learning_rate": 0.00012947551744109043,
106
+ "loss": 11.6934,
107
+ "step": 65
108
+ },
109
+ {
110
+ "epoch": 0.5,
111
+ "grad_norm": 3.3055272102355957,
112
+ "learning_rate": 0.00011736481776669306,
113
+ "loss": 11.4663,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.5357142857142857,
118
+ "grad_norm": 1.9420216083526611,
119
+ "learning_rate": 0.00010498458856606972,
120
+ "loss": 11.2446,
121
+ "step": 75
122
+ },
123
+ {
124
+ "epoch": 0.5714285714285714,
125
+ "grad_norm": 1.257276177406311,
126
+ "learning_rate": 9.252699064135758e-05,
127
+ "loss": 11.0546,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 0.6071428571428571,
132
+ "grad_norm": 1.5491865873336792,
133
+ "learning_rate": 8.018538568006027e-05,
134
+ "loss": 10.8896,
135
+ "step": 85
136
+ },
137
+ {
138
+ "epoch": 0.6428571428571429,
139
+ "grad_norm": 21.172082901000977,
140
+ "learning_rate": 6.815133497483157e-05,
141
+ "loss": 10.7927,
142
+ "step": 90
143
+ },
144
+ {
145
+ "epoch": 0.6785714285714286,
146
+ "grad_norm": 1.501454472541809,
147
+ "learning_rate": 5.6611626088244194e-05,
148
+ "loss": 10.7241,
149
+ "step": 95
150
+ },
151
+ {
152
+ "epoch": 0.7142857142857143,
153
+ "grad_norm": 1.7851982116699219,
154
+ "learning_rate": 4.574537361342407e-05,
155
+ "loss": 10.5994,
156
+ "step": 100
157
+ },
158
+ {
159
+ "epoch": 0.75,
160
+ "grad_norm": 2.065108299255371,
161
+ "learning_rate": 3.5721239031346066e-05,
162
+ "loss": 10.5151,
163
+ "step": 105
164
+ },
165
+ {
166
+ "epoch": 0.7857142857142857,
167
+ "grad_norm": 1.3877239227294922,
168
+ "learning_rate": 2.669481281701739e-05,
169
+ "loss": 10.5179,
170
+ "step": 110
171
+ },
172
+ {
173
+ "epoch": 0.8214285714285714,
174
+ "grad_norm": 1.3329989910125732,
175
+ "learning_rate": 1.880619942841435e-05,
176
+ "loss": 10.4364,
177
+ "step": 115
178
+ },
179
+ {
180
+ "epoch": 0.8571428571428571,
181
+ "grad_norm": 1.767424464225769,
182
+ "learning_rate": 1.2177842662977135e-05,
183
+ "loss": 10.4225,
184
+ "step": 120
185
+ },
186
+ {
187
+ "epoch": 0.8928571428571429,
188
+ "grad_norm": 1.3661062717437744,
189
+ "learning_rate": 6.9126251355795864e-06,
190
+ "loss": 10.4211,
191
+ "step": 125
192
+ },
193
+ {
194
+ "epoch": 0.9285714285714286,
195
+ "grad_norm": 1.1075615882873535,
196
+ "learning_rate": 3.092271377092215e-06,
197
+ "loss": 10.3691,
198
+ "step": 130
199
+ },
200
+ {
201
+ "epoch": 0.9642857142857143,
202
+ "grad_norm": 1.3276323080062866,
203
+ "learning_rate": 7.760793399827937e-07,
204
+ "loss": 10.3975,
205
+ "step": 135
206
+ },
207
+ {
208
+ "epoch": 1.0,
209
+ "grad_norm": 1.1502283811569214,
210
+ "learning_rate": 0.0,
211
+ "loss": 10.3696,
212
+ "step": 140
213
+ },
214
+ {
215
+ "epoch": 1.0,
216
+ "eval_loss": 9.936275482177734,
217
+ "eval_runtime": 1.4505,
218
+ "eval_samples_per_second": 122.712,
219
+ "eval_steps_per_second": 2.068,
220
+ "step": 140
221
+ },
222
+ {
223
+ "epoch": 1.0,
224
+ "step": 140,
225
+ "total_flos": 4.268850850782249e+17,
226
+ "train_loss": 14.163321549551828,
227
+ "train_runtime": 384.2161,
228
+ "train_samples_per_second": 46.599,
229
+ "train_steps_per_second": 0.364
230
+ }
231
+ ],
232
+ "logging_steps": 5,
233
+ "max_steps": 140,
234
+ "num_input_tokens_seen": 0,
235
+ "num_train_epochs": 1,
236
+ "save_steps": 100,
237
+ "stateful_callbacks": {
238
+ "TrainerControl": {
239
+ "args": {
240
+ "should_epoch_stop": false,
241
+ "should_evaluate": false,
242
+ "should_log": false,
243
+ "should_save": true,
244
+ "should_training_stop": true
245
+ },
246
+ "attributes": {}
247
+ }
248
+ },
249
+ "total_flos": 4.268850850782249e+17,
250
+ "train_batch_size": 8,
251
+ "trial_name": null,
252
+ "trial_params": null
253
+ }