chansung commited on
Commit
e65c3d1
1 Parent(s): 65e9e24

Model save

Browse files
README.md ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: gemma
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - sft
7
+ - generated_from_trainer
8
+ base_model: google/gemma-2b
9
+ datasets:
10
+ - generator
11
+ model-index:
12
+ - name: gemma2b-summarize-gemini1_5flash-16k
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # gemma2b-summarize-gemini1_5flash-16k
20
+
21
+ This model is a fine-tuned version of [google/gemma-2b](https://huggingface.co/google/gemma-2b) on the generator dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 2.5319
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 0.0002
43
+ - train_batch_size: 8
44
+ - eval_batch_size: 8
45
+ - seed: 42
46
+ - distributed_type: multi-GPU
47
+ - num_devices: 4
48
+ - gradient_accumulation_steps: 2
49
+ - total_train_batch_size: 64
50
+ - total_eval_batch_size: 32
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: cosine
53
+ - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 10
55
+
56
+ ### Training results
57
+
58
+ | Training Loss | Epoch | Step | Validation Loss |
59
+ |:-------------:|:------:|:----:|:---------------:|
60
+ | 2.0246 | 0.9811 | 26 | 2.6613 |
61
+ | 1.3202 | 2.0 | 53 | 2.5405 |
62
+ | 1.1694 | 2.9811 | 79 | 2.5125 |
63
+ | 1.1076 | 4.0 | 106 | 2.5138 |
64
+ | 1.0651 | 4.9811 | 132 | 2.5086 |
65
+ | 1.0394 | 6.0 | 159 | 2.5248 |
66
+ | 1.0232 | 6.9811 | 185 | 2.5264 |
67
+ | 1.0042 | 8.0 | 212 | 2.5296 |
68
+ | 1.0109 | 8.9811 | 238 | 2.5319 |
69
+ | 1.0064 | 9.8113 | 260 | 2.5319 |
70
+
71
+
72
+ ### Framework versions
73
+
74
+ - PEFT 0.11.1
75
+ - Transformers 4.40.1
76
+ - Pytorch 2.2.0+cu121
77
+ - Datasets 2.19.2
78
+ - Tokenizers 0.19.1
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bea73046909a36a0f3f061bf594abff0ea9319ce6157e94affcb4bb0f2e6a920
3
  size 19644912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79b70f18c4af19c965e4d7c9e04deee30ece50282b284efbf0667a6a9c1881fa
3
  size 19644912
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.81132075471698,
3
+ "total_flos": 2.036227620470784e+17,
4
+ "train_loss": 1.2490827266986553,
5
+ "train_runtime": 901.3115,
6
+ "train_samples": 15838,
7
+ "train_samples_per_second": 18.473,
8
+ "train_steps_per_second": 0.288
9
+ }
runs/Jun05_04-49-43_7bdd13775218/events.out.tfevents.1717563008.7bdd13775218.65724.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42b0e4ea335874c2b0a58d86c639f1204e7e731767e0458c578323b8980ec9b7
3
- size 15875
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:340e67d08b3b4552c6494647e9389e92528a94053c0f990226be010a0a3fc6b6
3
+ size 19574
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.81132075471698,
3
+ "total_flos": 2.036227620470784e+17,
4
+ "train_loss": 1.2490827266986553,
5
+ "train_runtime": 901.3115,
6
+ "train_samples": 15838,
7
+ "train_samples_per_second": 18.473,
8
+ "train_steps_per_second": 0.288
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.81132075471698,
5
+ "eval_steps": 500,
6
+ "global_step": 260,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03773584905660377,
13
+ "grad_norm": 1.984375,
14
+ "learning_rate": 7.692307692307694e-06,
15
+ "loss": 2.9895,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.18867924528301888,
20
+ "grad_norm": 2.3125,
21
+ "learning_rate": 3.846153846153846e-05,
22
+ "loss": 3.0316,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.37735849056603776,
27
+ "grad_norm": 1.7265625,
28
+ "learning_rate": 7.692307692307693e-05,
29
+ "loss": 2.9227,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.5660377358490566,
34
+ "grad_norm": 1.4921875,
35
+ "learning_rate": 0.00011538461538461538,
36
+ "loss": 2.5841,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.7547169811320755,
41
+ "grad_norm": 1.4453125,
42
+ "learning_rate": 0.00015384615384615385,
43
+ "loss": 2.3138,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.9433962264150944,
48
+ "grad_norm": 1.1640625,
49
+ "learning_rate": 0.00019230769230769233,
50
+ "loss": 2.0246,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.9811320754716981,
55
+ "eval_loss": 2.6613192558288574,
56
+ "eval_runtime": 0.7072,
57
+ "eval_samples_per_second": 14.139,
58
+ "eval_steps_per_second": 1.414,
59
+ "step": 26
60
+ },
61
+ {
62
+ "epoch": 1.1320754716981132,
63
+ "grad_norm": 0.6015625,
64
+ "learning_rate": 0.00019985583705641418,
65
+ "loss": 1.7376,
66
+ "step": 30
67
+ },
68
+ {
69
+ "epoch": 1.320754716981132,
70
+ "grad_norm": 0.466796875,
71
+ "learning_rate": 0.0001992708874098054,
72
+ "loss": 1.5704,
73
+ "step": 35
74
+ },
75
+ {
76
+ "epoch": 1.509433962264151,
77
+ "grad_norm": 0.498046875,
78
+ "learning_rate": 0.00019823877374156647,
79
+ "loss": 1.4624,
80
+ "step": 40
81
+ },
82
+ {
83
+ "epoch": 1.6981132075471699,
84
+ "grad_norm": 0.390625,
85
+ "learning_rate": 0.00019676414519013781,
86
+ "loss": 1.3792,
87
+ "step": 45
88
+ },
89
+ {
90
+ "epoch": 1.8867924528301887,
91
+ "grad_norm": 0.333984375,
92
+ "learning_rate": 0.00019485364419471454,
93
+ "loss": 1.3202,
94
+ "step": 50
95
+ },
96
+ {
97
+ "epoch": 2.0,
98
+ "eval_loss": 2.540452480316162,
99
+ "eval_runtime": 0.5421,
100
+ "eval_samples_per_second": 18.446,
101
+ "eval_steps_per_second": 1.845,
102
+ "step": 53
103
+ },
104
+ {
105
+ "epoch": 2.0754716981132075,
106
+ "grad_norm": 0.2314453125,
107
+ "learning_rate": 0.00019251587657449236,
108
+ "loss": 1.2675,
109
+ "step": 55
110
+ },
111
+ {
112
+ "epoch": 2.2641509433962264,
113
+ "grad_norm": 0.2421875,
114
+ "learning_rate": 0.0001897613727639014,
115
+ "loss": 1.2218,
116
+ "step": 60
117
+ },
118
+ {
119
+ "epoch": 2.452830188679245,
120
+ "grad_norm": 0.298828125,
121
+ "learning_rate": 0.00018660254037844388,
122
+ "loss": 1.2149,
123
+ "step": 65
124
+ },
125
+ {
126
+ "epoch": 2.641509433962264,
127
+ "grad_norm": 0.546875,
128
+ "learning_rate": 0.00018305360832480117,
129
+ "loss": 1.179,
130
+ "step": 70
131
+ },
132
+ {
133
+ "epoch": 2.830188679245283,
134
+ "grad_norm": 0.369140625,
135
+ "learning_rate": 0.0001791305627069662,
136
+ "loss": 1.1694,
137
+ "step": 75
138
+ },
139
+ {
140
+ "epoch": 2.981132075471698,
141
+ "eval_loss": 2.5124902725219727,
142
+ "eval_runtime": 0.6642,
143
+ "eval_samples_per_second": 15.055,
144
+ "eval_steps_per_second": 1.506,
145
+ "step": 79
146
+ },
147
+ {
148
+ "epoch": 3.018867924528302,
149
+ "grad_norm": 0.275390625,
150
+ "learning_rate": 0.00017485107481711012,
151
+ "loss": 1.1675,
152
+ "step": 80
153
+ },
154
+ {
155
+ "epoch": 3.207547169811321,
156
+ "grad_norm": 0.53515625,
157
+ "learning_rate": 0.00017023442153554777,
158
+ "loss": 1.1345,
159
+ "step": 85
160
+ },
161
+ {
162
+ "epoch": 3.3962264150943398,
163
+ "grad_norm": 0.482421875,
164
+ "learning_rate": 0.0001653013984983585,
165
+ "loss": 1.1304,
166
+ "step": 90
167
+ },
168
+ {
169
+ "epoch": 3.5849056603773586,
170
+ "grad_norm": 0.421875,
171
+ "learning_rate": 0.0001600742264237979,
172
+ "loss": 1.1194,
173
+ "step": 95
174
+ },
175
+ {
176
+ "epoch": 3.7735849056603774,
177
+ "grad_norm": 0.322265625,
178
+ "learning_rate": 0.00015457645101945046,
179
+ "loss": 1.0993,
180
+ "step": 100
181
+ },
182
+ {
183
+ "epoch": 3.9622641509433962,
184
+ "grad_norm": 0.25390625,
185
+ "learning_rate": 0.00014883283692099112,
186
+ "loss": 1.1076,
187
+ "step": 105
188
+ },
189
+ {
190
+ "epoch": 4.0,
191
+ "eval_loss": 2.5138261318206787,
192
+ "eval_runtime": 0.5374,
193
+ "eval_samples_per_second": 18.609,
194
+ "eval_steps_per_second": 1.861,
195
+ "step": 106
196
+ },
197
+ {
198
+ "epoch": 4.150943396226415,
199
+ "grad_norm": 0.2451171875,
200
+ "learning_rate": 0.00014286925614030542,
201
+ "loss": 1.0832,
202
+ "step": 110
203
+ },
204
+ {
205
+ "epoch": 4.339622641509434,
206
+ "grad_norm": 0.26953125,
207
+ "learning_rate": 0.00013671257152545277,
208
+ "loss": 1.0718,
209
+ "step": 115
210
+ },
211
+ {
212
+ "epoch": 4.528301886792453,
213
+ "grad_norm": 0.416015625,
214
+ "learning_rate": 0.0001303905157574247,
215
+ "loss": 1.0737,
216
+ "step": 120
217
+ },
218
+ {
219
+ "epoch": 4.716981132075472,
220
+ "grad_norm": 0.27734375,
221
+ "learning_rate": 0.0001239315664287558,
222
+ "loss": 1.0708,
223
+ "step": 125
224
+ },
225
+ {
226
+ "epoch": 4.90566037735849,
227
+ "grad_norm": 0.6015625,
228
+ "learning_rate": 0.00011736481776669306,
229
+ "loss": 1.0651,
230
+ "step": 130
231
+ },
232
+ {
233
+ "epoch": 4.981132075471698,
234
+ "eval_loss": 2.5085928440093994,
235
+ "eval_runtime": 0.6726,
236
+ "eval_samples_per_second": 14.868,
237
+ "eval_steps_per_second": 1.487,
238
+ "step": 132
239
+ },
240
+ {
241
+ "epoch": 5.09433962264151,
242
+ "grad_norm": 0.388671875,
243
+ "learning_rate": 0.00011071984957874479,
244
+ "loss": 1.055,
245
+ "step": 135
246
+ },
247
+ {
248
+ "epoch": 5.283018867924528,
249
+ "grad_norm": 0.4296875,
250
+ "learning_rate": 0.00010402659401094152,
251
+ "loss": 1.0371,
252
+ "step": 140
253
+ },
254
+ {
255
+ "epoch": 5.471698113207547,
256
+ "grad_norm": 0.271484375,
257
+ "learning_rate": 9.73152007189939e-05,
258
+ "loss": 1.0452,
259
+ "step": 145
260
+ },
261
+ {
262
+ "epoch": 5.660377358490566,
263
+ "grad_norm": 0.263671875,
264
+ "learning_rate": 9.061590105968208e-05,
265
+ "loss": 1.0461,
266
+ "step": 150
267
+ },
268
+ {
269
+ "epoch": 5.849056603773585,
270
+ "grad_norm": 0.271484375,
271
+ "learning_rate": 8.395887191422397e-05,
272
+ "loss": 1.0394,
273
+ "step": 155
274
+ },
275
+ {
276
+ "epoch": 6.0,
277
+ "eval_loss": 2.5248091220855713,
278
+ "eval_runtime": 0.5366,
279
+ "eval_samples_per_second": 18.636,
280
+ "eval_steps_per_second": 1.864,
281
+ "step": 159
282
+ },
283
+ {
284
+ "epoch": 6.037735849056604,
285
+ "grad_norm": 0.314453125,
286
+ "learning_rate": 7.73740997570278e-05,
287
+ "loss": 1.0302,
288
+ "step": 160
289
+ },
290
+ {
291
+ "epoch": 6.226415094339623,
292
+ "grad_norm": 0.29296875,
293
+ "learning_rate": 7.089124558212871e-05,
294
+ "loss": 1.0164,
295
+ "step": 165
296
+ },
297
+ {
298
+ "epoch": 6.415094339622642,
299
+ "grad_norm": 0.27734375,
300
+ "learning_rate": 6.453951129574644e-05,
301
+ "loss": 1.0265,
302
+ "step": 170
303
+ },
304
+ {
305
+ "epoch": 6.60377358490566,
306
+ "grad_norm": 0.359375,
307
+ "learning_rate": 5.834750817679606e-05,
308
+ "loss": 1.019,
309
+ "step": 175
310
+ },
311
+ {
312
+ "epoch": 6.7924528301886795,
313
+ "grad_norm": 0.283203125,
314
+ "learning_rate": 5.234312799786921e-05,
315
+ "loss": 1.0159,
316
+ "step": 180
317
+ },
318
+ {
319
+ "epoch": 6.981132075471698,
320
+ "grad_norm": 0.275390625,
321
+ "learning_rate": 4.6553417387219886e-05,
322
+ "loss": 1.0232,
323
+ "step": 185
324
+ },
325
+ {
326
+ "epoch": 6.981132075471698,
327
+ "eval_loss": 2.526435136795044,
328
+ "eval_runtime": 0.6455,
329
+ "eval_samples_per_second": 15.492,
330
+ "eval_steps_per_second": 1.549,
331
+ "step": 185
332
+ },
333
+ {
334
+ "epoch": 7.169811320754717,
335
+ "grad_norm": 0.296875,
336
+ "learning_rate": 4.100445599768774e-05,
337
+ "loss": 1.0125,
338
+ "step": 190
339
+ },
340
+ {
341
+ "epoch": 7.3584905660377355,
342
+ "grad_norm": 0.2734375,
343
+ "learning_rate": 3.5721239031346066e-05,
344
+ "loss": 1.0212,
345
+ "step": 195
346
+ },
347
+ {
348
+ "epoch": 7.547169811320755,
349
+ "grad_norm": 0.294921875,
350
+ "learning_rate": 3.072756464904006e-05,
351
+ "loss": 1.0052,
352
+ "step": 200
353
+ },
354
+ {
355
+ "epoch": 7.735849056603773,
356
+ "grad_norm": 0.265625,
357
+ "learning_rate": 2.6045926771976303e-05,
358
+ "loss": 1.0067,
359
+ "step": 205
360
+ },
361
+ {
362
+ "epoch": 7.9245283018867925,
363
+ "grad_norm": 0.267578125,
364
+ "learning_rate": 2.1697413758237784e-05,
365
+ "loss": 1.0042,
366
+ "step": 210
367
+ },
368
+ {
369
+ "epoch": 8.0,
370
+ "eval_loss": 2.5296316146850586,
371
+ "eval_runtime": 0.534,
372
+ "eval_samples_per_second": 18.728,
373
+ "eval_steps_per_second": 1.873,
374
+ "step": 212
375
+ },
376
+ {
377
+ "epoch": 8.11320754716981,
378
+ "grad_norm": 0.3203125,
379
+ "learning_rate": 1.7701613410634365e-05,
380
+ "loss": 1.0077,
381
+ "step": 215
382
+ },
383
+ {
384
+ "epoch": 8.30188679245283,
385
+ "grad_norm": 0.271484375,
386
+ "learning_rate": 1.4076524743778319e-05,
387
+ "loss": 0.999,
388
+ "step": 220
389
+ },
390
+ {
391
+ "epoch": 8.49056603773585,
392
+ "grad_norm": 0.27734375,
393
+ "learning_rate": 1.083847690782972e-05,
394
+ "loss": 1.0027,
395
+ "step": 225
396
+ },
397
+ {
398
+ "epoch": 8.679245283018869,
399
+ "grad_norm": 0.26953125,
400
+ "learning_rate": 8.002055634117578e-06,
401
+ "loss": 0.9984,
402
+ "step": 230
403
+ },
404
+ {
405
+ "epoch": 8.867924528301886,
406
+ "grad_norm": 0.271484375,
407
+ "learning_rate": 5.580037533961546e-06,
408
+ "loss": 1.0109,
409
+ "step": 235
410
+ },
411
+ {
412
+ "epoch": 8.981132075471699,
413
+ "eval_loss": 2.531932830810547,
414
+ "eval_runtime": 0.6509,
415
+ "eval_samples_per_second": 15.363,
416
+ "eval_steps_per_second": 1.536,
417
+ "step": 238
418
+ },
419
+ {
420
+ "epoch": 9.056603773584905,
421
+ "grad_norm": 0.259765625,
422
+ "learning_rate": 3.5833325466437694e-06,
423
+ "loss": 1.0008,
424
+ "step": 240
425
+ },
426
+ {
427
+ "epoch": 9.245283018867925,
428
+ "grad_norm": 0.259765625,
429
+ "learning_rate": 2.0209347957732328e-06,
430
+ "loss": 1.0094,
431
+ "step": 245
432
+ },
433
+ {
434
+ "epoch": 9.433962264150944,
435
+ "grad_norm": 0.283203125,
436
+ "learning_rate": 8.998820754091531e-07,
437
+ "loss": 0.9987,
438
+ "step": 250
439
+ },
440
+ {
441
+ "epoch": 9.622641509433961,
442
+ "grad_norm": 0.255859375,
443
+ "learning_rate": 2.2522414843748618e-07,
444
+ "loss": 1.0006,
445
+ "step": 255
446
+ },
447
+ {
448
+ "epoch": 9.81132075471698,
449
+ "grad_norm": 0.263671875,
450
+ "learning_rate": 0.0,
451
+ "loss": 1.0064,
452
+ "step": 260
453
+ },
454
+ {
455
+ "epoch": 9.81132075471698,
456
+ "eval_loss": 2.531856060028076,
457
+ "eval_runtime": 0.5289,
458
+ "eval_samples_per_second": 18.906,
459
+ "eval_steps_per_second": 1.891,
460
+ "step": 260
461
+ },
462
+ {
463
+ "epoch": 9.81132075471698,
464
+ "step": 260,
465
+ "total_flos": 2.036227620470784e+17,
466
+ "train_loss": 1.2490827266986553,
467
+ "train_runtime": 901.3115,
468
+ "train_samples_per_second": 18.473,
469
+ "train_steps_per_second": 0.288
470
+ }
471
+ ],
472
+ "logging_steps": 5,
473
+ "max_steps": 260,
474
+ "num_input_tokens_seen": 0,
475
+ "num_train_epochs": 10,
476
+ "save_steps": 100,
477
+ "total_flos": 2.036227620470784e+17,
478
+ "train_batch_size": 8,
479
+ "trial_name": null,
480
+ "trial_params": null
481
+ }