tyzhu commited on
Commit
80961f1
1 Parent(s): 5c0f55e

End of training

Browse files
Files changed (6) hide show
  1. README.md +14 -2
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. tokenizer.json +1 -6
  5. train_results.json +9 -0
  6. trainer_state.json +560 -0
README.md CHANGED
@@ -3,11 +3,23 @@ license: other
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: lmind_hotpot_train8000_eval7405_v1_reciteonly_qa_Qwen_Qwen1.5-4B_3e-5_lora2
10
- results: []
 
 
 
 
 
 
 
 
 
 
11
  library_name: peft
12
  ---
13
 
@@ -16,7 +28,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # lmind_hotpot_train8000_eval7405_v1_reciteonly_qa_Qwen_Qwen1.5-4B_3e-5_lora2
18
 
19
- This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 1.8696
22
  - Accuracy: 0.6658
 
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - tyzhu/lmind_hotpot_train8000_eval7405_v1_reciteonly_qa
8
  metrics:
9
  - accuracy
10
  model-index:
11
  - name: lmind_hotpot_train8000_eval7405_v1_reciteonly_qa_Qwen_Qwen1.5-4B_3e-5_lora2
12
+ results:
13
+ - task:
14
+ name: Causal Language Modeling
15
+ type: text-generation
16
+ dataset:
17
+ name: tyzhu/lmind_hotpot_train8000_eval7405_v1_reciteonly_qa
18
+ type: tyzhu/lmind_hotpot_train8000_eval7405_v1_reciteonly_qa
19
+ metrics:
20
+ - name: Accuracy
21
+ type: accuracy
22
+ value: 0.6657583697234353
23
  library_name: peft
24
  ---
25
 
 
28
 
29
  # lmind_hotpot_train8000_eval7405_v1_reciteonly_qa_Qwen_Qwen1.5-4B_3e-5_lora2
30
 
31
+ This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on the tyzhu/lmind_hotpot_train8000_eval7405_v1_reciteonly_qa dataset.
32
  It achieves the following results on the evaluation set:
33
  - Loss: 1.8696
34
  - Accuracy: 0.6658
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.6657583697234353,
4
+ "eval_loss": 1.8696147203445435,
5
+ "eval_runtime": 8.0567,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 62.06,
8
+ "eval_steps_per_second": 7.82,
9
+ "perplexity": 6.485797072186353,
10
+ "total_flos": 9.221411586147615e+17,
11
+ "train_loss": 1.163707649230957,
12
+ "train_runtime": 11749.7477,
13
+ "train_samples": 8000,
14
+ "train_samples_per_second": 13.617,
15
+ "train_steps_per_second": 0.426
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.6657583697234353,
4
+ "eval_loss": 1.8696147203445435,
5
+ "eval_runtime": 8.0567,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 62.06,
8
+ "eval_steps_per_second": 7.82,
9
+ "perplexity": 6.485797072186353
10
+ }
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "total_flos": 9.221411586147615e+17,
4
+ "train_loss": 1.163707649230957,
5
+ "train_runtime": 11749.7477,
6
+ "train_samples": 8000,
7
+ "train_samples_per_second": 13.617,
8
+ "train_steps_per_second": 0.426
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 20.0,
5
+ "eval_steps": 500,
6
+ "global_step": 5000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.4,
13
+ "grad_norm": 0.1675417125225067,
14
+ "learning_rate": 3e-05,
15
+ "loss": 1.5895,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.8,
20
+ "grad_norm": 0.1831749528646469,
21
+ "learning_rate": 3e-05,
22
+ "loss": 1.4742,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "eval_accuracy": 0.6748151382823872,
28
+ "eval_loss": 1.531341314315796,
29
+ "eval_runtime": 9.1549,
30
+ "eval_samples_per_second": 54.616,
31
+ "eval_steps_per_second": 6.882,
32
+ "step": 250
33
+ },
34
+ {
35
+ "epoch": 1.2,
36
+ "grad_norm": 0.22951605916023254,
37
+ "learning_rate": 3e-05,
38
+ "loss": 1.4652,
39
+ "step": 300
40
+ },
41
+ {
42
+ "epoch": 1.6,
43
+ "grad_norm": 0.23647047579288483,
44
+ "learning_rate": 3e-05,
45
+ "loss": 1.4601,
46
+ "step": 400
47
+ },
48
+ {
49
+ "epoch": 2.0,
50
+ "grad_norm": 0.2248772829771042,
51
+ "learning_rate": 3e-05,
52
+ "loss": 1.45,
53
+ "step": 500
54
+ },
55
+ {
56
+ "epoch": 2.0,
57
+ "eval_accuracy": 0.6756593886462882,
58
+ "eval_loss": 1.5196012258529663,
59
+ "eval_runtime": 9.0923,
60
+ "eval_samples_per_second": 54.992,
61
+ "eval_steps_per_second": 6.929,
62
+ "step": 500
63
+ },
64
+ {
65
+ "epoch": 2.4,
66
+ "grad_norm": 0.26751402020454407,
67
+ "learning_rate": 3e-05,
68
+ "loss": 1.4361,
69
+ "step": 600
70
+ },
71
+ {
72
+ "epoch": 2.8,
73
+ "grad_norm": 0.29390770196914673,
74
+ "learning_rate": 3e-05,
75
+ "loss": 1.4269,
76
+ "step": 700
77
+ },
78
+ {
79
+ "epoch": 3.0,
80
+ "eval_accuracy": 0.6760756914119359,
81
+ "eval_loss": 1.5133788585662842,
82
+ "eval_runtime": 9.1109,
83
+ "eval_samples_per_second": 54.879,
84
+ "eval_steps_per_second": 6.915,
85
+ "step": 750
86
+ },
87
+ {
88
+ "epoch": 3.2,
89
+ "grad_norm": 0.3322733938694,
90
+ "learning_rate": 3e-05,
91
+ "loss": 1.4184,
92
+ "step": 800
93
+ },
94
+ {
95
+ "epoch": 3.6,
96
+ "grad_norm": 0.3646068871021271,
97
+ "learning_rate": 3e-05,
98
+ "loss": 1.3973,
99
+ "step": 900
100
+ },
101
+ {
102
+ "epoch": 4.0,
103
+ "grad_norm": 0.4150474965572357,
104
+ "learning_rate": 3e-05,
105
+ "loss": 1.3999,
106
+ "step": 1000
107
+ },
108
+ {
109
+ "epoch": 4.0,
110
+ "eval_accuracy": 0.6762474526928676,
111
+ "eval_loss": 1.511995792388916,
112
+ "eval_runtime": 9.0922,
113
+ "eval_samples_per_second": 54.992,
114
+ "eval_steps_per_second": 6.929,
115
+ "step": 1000
116
+ },
117
+ {
118
+ "epoch": 4.4,
119
+ "grad_norm": 0.44478657841682434,
120
+ "learning_rate": 3e-05,
121
+ "loss": 1.3624,
122
+ "step": 1100
123
+ },
124
+ {
125
+ "epoch": 4.8,
126
+ "grad_norm": 0.48866602778434753,
127
+ "learning_rate": 3e-05,
128
+ "loss": 1.3614,
129
+ "step": 1200
130
+ },
131
+ {
132
+ "epoch": 5.0,
133
+ "eval_accuracy": 0.6759650655021834,
134
+ "eval_loss": 1.5192290544509888,
135
+ "eval_runtime": 9.1143,
136
+ "eval_samples_per_second": 54.859,
137
+ "eval_steps_per_second": 6.912,
138
+ "step": 1250
139
+ },
140
+ {
141
+ "epoch": 5.2,
142
+ "grad_norm": 0.5195505023002625,
143
+ "learning_rate": 3e-05,
144
+ "loss": 1.3431,
145
+ "step": 1300
146
+ },
147
+ {
148
+ "epoch": 5.6,
149
+ "grad_norm": 0.5769343972206116,
150
+ "learning_rate": 3e-05,
151
+ "loss": 1.3264,
152
+ "step": 1400
153
+ },
154
+ {
155
+ "epoch": 6.0,
156
+ "grad_norm": 0.5181849598884583,
157
+ "learning_rate": 3e-05,
158
+ "loss": 1.3303,
159
+ "step": 1500
160
+ },
161
+ {
162
+ "epoch": 6.0,
163
+ "eval_accuracy": 0.6755080058224163,
164
+ "eval_loss": 1.5265752077102661,
165
+ "eval_runtime": 9.1234,
166
+ "eval_samples_per_second": 54.804,
167
+ "eval_steps_per_second": 6.905,
168
+ "step": 1500
169
+ },
170
+ {
171
+ "epoch": 6.4,
172
+ "grad_norm": 0.6372528076171875,
173
+ "learning_rate": 3e-05,
174
+ "loss": 1.2883,
175
+ "step": 1600
176
+ },
177
+ {
178
+ "epoch": 6.8,
179
+ "grad_norm": 0.6501044034957886,
180
+ "learning_rate": 3e-05,
181
+ "loss": 1.2946,
182
+ "step": 1700
183
+ },
184
+ {
185
+ "epoch": 7.0,
186
+ "eval_accuracy": 0.6747045123726346,
187
+ "eval_loss": 1.5446096658706665,
188
+ "eval_runtime": 9.0797,
189
+ "eval_samples_per_second": 55.068,
190
+ "eval_steps_per_second": 6.939,
191
+ "step": 1750
192
+ },
193
+ {
194
+ "epoch": 7.2,
195
+ "grad_norm": 0.7209816575050354,
196
+ "learning_rate": 3e-05,
197
+ "loss": 1.2705,
198
+ "step": 1800
199
+ },
200
+ {
201
+ "epoch": 7.6,
202
+ "grad_norm": 0.7495877742767334,
203
+ "learning_rate": 3e-05,
204
+ "loss": 1.2498,
205
+ "step": 1900
206
+ },
207
+ {
208
+ "epoch": 8.0,
209
+ "grad_norm": 0.681526780128479,
210
+ "learning_rate": 3e-05,
211
+ "loss": 1.2518,
212
+ "step": 2000
213
+ },
214
+ {
215
+ "epoch": 8.0,
216
+ "eval_accuracy": 0.6745036390101893,
217
+ "eval_loss": 1.5590205192565918,
218
+ "eval_runtime": 8.0597,
219
+ "eval_samples_per_second": 62.037,
220
+ "eval_steps_per_second": 7.817,
221
+ "step": 2000
222
+ },
223
+ {
224
+ "epoch": 8.4,
225
+ "grad_norm": 0.7470565438270569,
226
+ "learning_rate": 3e-05,
227
+ "loss": 1.2196,
228
+ "step": 2100
229
+ },
230
+ {
231
+ "epoch": 8.8,
232
+ "grad_norm": 0.7745229005813599,
233
+ "learning_rate": 3e-05,
234
+ "loss": 1.2082,
235
+ "step": 2200
236
+ },
237
+ {
238
+ "epoch": 9.0,
239
+ "eval_accuracy": 0.6740349344978166,
240
+ "eval_loss": 1.571682333946228,
241
+ "eval_runtime": 9.0961,
242
+ "eval_samples_per_second": 54.969,
243
+ "eval_steps_per_second": 6.926,
244
+ "step": 2250
245
+ },
246
+ {
247
+ "epoch": 9.2,
248
+ "grad_norm": 0.8478706479072571,
249
+ "learning_rate": 3e-05,
250
+ "loss": 1.2017,
251
+ "step": 2300
252
+ },
253
+ {
254
+ "epoch": 9.6,
255
+ "grad_norm": 0.9340612292289734,
256
+ "learning_rate": 3e-05,
257
+ "loss": 1.1742,
258
+ "step": 2400
259
+ },
260
+ {
261
+ "epoch": 10.0,
262
+ "grad_norm": 0.9207039475440979,
263
+ "learning_rate": 3e-05,
264
+ "loss": 1.19,
265
+ "step": 2500
266
+ },
267
+ {
268
+ "epoch": 10.0,
269
+ "eval_accuracy": 0.6727074235807861,
270
+ "eval_loss": 1.6021865606307983,
271
+ "eval_runtime": 9.0984,
272
+ "eval_samples_per_second": 54.955,
273
+ "eval_steps_per_second": 6.924,
274
+ "step": 2500
275
+ },
276
+ {
277
+ "epoch": 10.4,
278
+ "grad_norm": 0.9244349598884583,
279
+ "learning_rate": 3e-05,
280
+ "loss": 1.1299,
281
+ "step": 2600
282
+ },
283
+ {
284
+ "epoch": 10.8,
285
+ "grad_norm": 0.9110805988311768,
286
+ "learning_rate": 3e-05,
287
+ "loss": 1.1523,
288
+ "step": 2700
289
+ },
290
+ {
291
+ "epoch": 11.0,
292
+ "eval_accuracy": 0.672608442503639,
293
+ "eval_loss": 1.6098225116729736,
294
+ "eval_runtime": 9.117,
295
+ "eval_samples_per_second": 54.843,
296
+ "eval_steps_per_second": 6.91,
297
+ "step": 2750
298
+ },
299
+ {
300
+ "epoch": 11.2,
301
+ "grad_norm": 0.916287899017334,
302
+ "learning_rate": 3e-05,
303
+ "loss": 1.1278,
304
+ "step": 2800
305
+ },
306
+ {
307
+ "epoch": 11.6,
308
+ "grad_norm": 1.0008416175842285,
309
+ "learning_rate": 3e-05,
310
+ "loss": 1.0981,
311
+ "step": 2900
312
+ },
313
+ {
314
+ "epoch": 12.0,
315
+ "grad_norm": 0.9763438701629639,
316
+ "learning_rate": 3e-05,
317
+ "loss": 1.1193,
318
+ "step": 3000
319
+ },
320
+ {
321
+ "epoch": 12.0,
322
+ "eval_accuracy": 0.671589519650655,
323
+ "eval_loss": 1.6344681978225708,
324
+ "eval_runtime": 9.1836,
325
+ "eval_samples_per_second": 54.445,
326
+ "eval_steps_per_second": 6.86,
327
+ "step": 3000
328
+ },
329
+ {
330
+ "epoch": 12.4,
331
+ "grad_norm": 1.0682412385940552,
332
+ "learning_rate": 3e-05,
333
+ "loss": 1.0604,
334
+ "step": 3100
335
+ },
336
+ {
337
+ "epoch": 12.8,
338
+ "grad_norm": 1.1306017637252808,
339
+ "learning_rate": 3e-05,
340
+ "loss": 1.0736,
341
+ "step": 3200
342
+ },
343
+ {
344
+ "epoch": 13.0,
345
+ "eval_accuracy": 0.6707016011644833,
346
+ "eval_loss": 1.674833059310913,
347
+ "eval_runtime": 8.098,
348
+ "eval_samples_per_second": 61.744,
349
+ "eval_steps_per_second": 7.78,
350
+ "step": 3250
351
+ },
352
+ {
353
+ "epoch": 13.2,
354
+ "grad_norm": 1.154135823249817,
355
+ "learning_rate": 3e-05,
356
+ "loss": 1.054,
357
+ "step": 3300
358
+ },
359
+ {
360
+ "epoch": 13.6,
361
+ "grad_norm": 1.1169854402542114,
362
+ "learning_rate": 3e-05,
363
+ "loss": 1.0253,
364
+ "step": 3400
365
+ },
366
+ {
367
+ "epoch": 14.0,
368
+ "grad_norm": 1.0877137184143066,
369
+ "learning_rate": 3e-05,
370
+ "loss": 1.0414,
371
+ "step": 3500
372
+ },
373
+ {
374
+ "epoch": 14.0,
375
+ "eval_accuracy": 0.6701280931586608,
376
+ "eval_loss": 1.688016653060913,
377
+ "eval_runtime": 9.1712,
378
+ "eval_samples_per_second": 54.519,
379
+ "eval_steps_per_second": 6.869,
380
+ "step": 3500
381
+ },
382
+ {
383
+ "epoch": 14.4,
384
+ "grad_norm": 1.2245118618011475,
385
+ "learning_rate": 3e-05,
386
+ "loss": 0.9823,
387
+ "step": 3600
388
+ },
389
+ {
390
+ "epoch": 14.8,
391
+ "grad_norm": 1.2784464359283447,
392
+ "learning_rate": 3e-05,
393
+ "loss": 1.0069,
394
+ "step": 3700
395
+ },
396
+ {
397
+ "epoch": 15.0,
398
+ "eval_accuracy": 0.6693682678311499,
399
+ "eval_loss": 1.7182435989379883,
400
+ "eval_runtime": 9.1433,
401
+ "eval_samples_per_second": 54.685,
402
+ "eval_steps_per_second": 6.89,
403
+ "step": 3750
404
+ },
405
+ {
406
+ "epoch": 15.2,
407
+ "grad_norm": 1.182626724243164,
408
+ "learning_rate": 3e-05,
409
+ "loss": 0.9834,
410
+ "step": 3800
411
+ },
412
+ {
413
+ "epoch": 15.6,
414
+ "grad_norm": 1.3419315814971924,
415
+ "learning_rate": 3e-05,
416
+ "loss": 0.9608,
417
+ "step": 3900
418
+ },
419
+ {
420
+ "epoch": 16.0,
421
+ "grad_norm": 1.2352997064590454,
422
+ "learning_rate": 3e-05,
423
+ "loss": 0.9654,
424
+ "step": 4000
425
+ },
426
+ {
427
+ "epoch": 16.0,
428
+ "eval_accuracy": 0.6685036390101893,
429
+ "eval_loss": 1.7521902322769165,
430
+ "eval_runtime": 9.1045,
431
+ "eval_samples_per_second": 54.918,
432
+ "eval_steps_per_second": 6.92,
433
+ "step": 4000
434
+ },
435
+ {
436
+ "epoch": 16.4,
437
+ "grad_norm": 1.439382791519165,
438
+ "learning_rate": 3e-05,
439
+ "loss": 0.9236,
440
+ "step": 4100
441
+ },
442
+ {
443
+ "epoch": 16.8,
444
+ "grad_norm": 1.3681950569152832,
445
+ "learning_rate": 3e-05,
446
+ "loss": 0.9337,
447
+ "step": 4200
448
+ },
449
+ {
450
+ "epoch": 17.0,
451
+ "eval_accuracy": 0.6677409024745269,
452
+ "eval_loss": 1.7825894355773926,
453
+ "eval_runtime": 9.1188,
454
+ "eval_samples_per_second": 54.832,
455
+ "eval_steps_per_second": 6.909,
456
+ "step": 4250
457
+ },
458
+ {
459
+ "epoch": 17.2,
460
+ "grad_norm": 1.417385458946228,
461
+ "learning_rate": 3e-05,
462
+ "loss": 0.9135,
463
+ "step": 4300
464
+ },
465
+ {
466
+ "epoch": 17.6,
467
+ "grad_norm": 1.5673705339431763,
468
+ "learning_rate": 3e-05,
469
+ "loss": 0.8941,
470
+ "step": 4400
471
+ },
472
+ {
473
+ "epoch": 18.0,
474
+ "grad_norm": 1.5578211545944214,
475
+ "learning_rate": 3e-05,
476
+ "loss": 0.9,
477
+ "step": 4500
478
+ },
479
+ {
480
+ "epoch": 18.0,
481
+ "eval_accuracy": 0.6671615720524018,
482
+ "eval_loss": 1.8080195188522339,
483
+ "eval_runtime": 8.0689,
484
+ "eval_samples_per_second": 61.967,
485
+ "eval_steps_per_second": 7.808,
486
+ "step": 4500
487
+ },
488
+ {
489
+ "epoch": 18.4,
490
+ "grad_norm": 1.671644687652588,
491
+ "learning_rate": 3e-05,
492
+ "loss": 0.8522,
493
+ "step": 4600
494
+ },
495
+ {
496
+ "epoch": 18.8,
497
+ "grad_norm": 1.5266335010528564,
498
+ "learning_rate": 3e-05,
499
+ "loss": 0.8704,
500
+ "step": 4700
501
+ },
502
+ {
503
+ "epoch": 19.0,
504
+ "eval_accuracy": 0.6663289665211063,
505
+ "eval_loss": 1.8349848985671997,
506
+ "eval_runtime": 9.1076,
507
+ "eval_samples_per_second": 54.899,
508
+ "eval_steps_per_second": 6.917,
509
+ "step": 4750
510
+ },
511
+ {
512
+ "epoch": 19.2,
513
+ "grad_norm": 1.5969411134719849,
514
+ "learning_rate": 3e-05,
515
+ "loss": 0.8469,
516
+ "step": 4800
517
+ },
518
+ {
519
+ "epoch": 19.6,
520
+ "grad_norm": 1.5390815734863281,
521
+ "learning_rate": 3e-05,
522
+ "loss": 0.8398,
523
+ "step": 4900
524
+ },
525
+ {
526
+ "epoch": 20.0,
527
+ "grad_norm": 1.5651904344558716,
528
+ "learning_rate": 3e-05,
529
+ "loss": 0.8407,
530
+ "step": 5000
531
+ },
532
+ {
533
+ "epoch": 20.0,
534
+ "eval_accuracy": 0.6657583697234353,
535
+ "eval_loss": 1.8696147203445435,
536
+ "eval_runtime": 9.1023,
537
+ "eval_samples_per_second": 54.931,
538
+ "eval_steps_per_second": 6.921,
539
+ "step": 5000
540
+ },
541
+ {
542
+ "epoch": 20.0,
543
+ "step": 5000,
544
+ "total_flos": 9.221411586147615e+17,
545
+ "train_loss": 1.163707649230957,
546
+ "train_runtime": 11749.7477,
547
+ "train_samples_per_second": 13.617,
548
+ "train_steps_per_second": 0.426
549
+ }
550
+ ],
551
+ "logging_steps": 100,
552
+ "max_steps": 5000,
553
+ "num_input_tokens_seen": 0,
554
+ "num_train_epochs": 20,
555
+ "save_steps": 500,
556
+ "total_flos": 9.221411586147615e+17,
557
+ "train_batch_size": 1,
558
+ "trial_name": null,
559
+ "trial_params": null
560
+ }