tyzhu commited on
Commit
fd83159
1 Parent(s): 89e10da

End of training

Browse files
Files changed (6) hide show
  1. README.md +14 -2
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. tokenizer.json +1 -6
  5. train_results.json +9 -0
  6. trainer_state.json +560 -0
README.md CHANGED
@@ -3,11 +3,23 @@ license: other
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - accuracy
8
  model-index:
9
  - name: lmind_hotpot_train8000_eval7405_v1_reciteonly_qa_Qwen_Qwen1.5-4B_5e-5_lora2
10
- results: []
 
 
 
 
 
 
 
 
 
 
11
  library_name: peft
12
  ---
13
 
@@ -16,7 +28,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # lmind_hotpot_train8000_eval7405_v1_reciteonly_qa_Qwen_Qwen1.5-4B_5e-5_lora2
18
 
19
- This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 2.2235
22
  - Accuracy: 0.6607
 
3
  base_model: Qwen/Qwen1.5-4B
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - tyzhu/lmind_hotpot_train8000_eval7405_v1_reciteonly_qa
8
  metrics:
9
  - accuracy
10
  model-index:
11
  - name: lmind_hotpot_train8000_eval7405_v1_reciteonly_qa_Qwen_Qwen1.5-4B_5e-5_lora2
12
+ results:
13
+ - task:
14
+ name: Causal Language Modeling
15
+ type: text-generation
16
+ dataset:
17
+ name: tyzhu/lmind_hotpot_train8000_eval7405_v1_reciteonly_qa
18
+ type: tyzhu/lmind_hotpot_train8000_eval7405_v1_reciteonly_qa
19
+ metrics:
20
+ - name: Accuracy
21
+ type: accuracy
22
+ value: 0.6606986899563319
23
  library_name: peft
24
  ---
25
 
 
28
 
29
  # lmind_hotpot_train8000_eval7405_v1_reciteonly_qa_Qwen_Qwen1.5-4B_5e-5_lora2
30
 
31
+ This model is a fine-tuned version of [Qwen/Qwen1.5-4B](https://huggingface.co/Qwen/Qwen1.5-4B) on the tyzhu/lmind_hotpot_train8000_eval7405_v1_reciteonly_qa dataset.
32
  It achieves the following results on the evaluation set:
33
  - Loss: 2.2235
34
  - Accuracy: 0.6607
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.6606986899563319,
4
+ "eval_loss": 2.2234957218170166,
5
+ "eval_runtime": 8.8856,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 56.271,
8
+ "eval_steps_per_second": 7.09,
9
+ "perplexity": 9.239573455995565,
10
+ "total_flos": 9.221411586147615e+17,
11
+ "train_loss": 0.9736675170898438,
12
+ "train_runtime": 11083.462,
13
+ "train_samples": 8000,
14
+ "train_samples_per_second": 14.436,
15
+ "train_steps_per_second": 0.451
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.6606986899563319,
4
+ "eval_loss": 2.2234957218170166,
5
+ "eval_runtime": 8.8856,
6
+ "eval_samples": 500,
7
+ "eval_samples_per_second": 56.271,
8
+ "eval_steps_per_second": 7.09,
9
+ "perplexity": 9.239573455995565
10
+ }
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1024,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "total_flos": 9.221411586147615e+17,
4
+ "train_loss": 0.9736675170898438,
5
+ "train_runtime": 11083.462,
6
+ "train_samples": 8000,
7
+ "train_samples_per_second": 14.436,
8
+ "train_steps_per_second": 0.451
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 20.0,
5
+ "eval_steps": 500,
6
+ "global_step": 5000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.4,
13
+ "grad_norm": 0.16342471539974213,
14
+ "learning_rate": 5e-05,
15
+ "loss": 1.5536,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.8,
20
+ "grad_norm": 0.1810108870267868,
21
+ "learning_rate": 5e-05,
22
+ "loss": 1.4675,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "eval_accuracy": 0.6751848617176128,
28
+ "eval_loss": 1.5235044956207275,
29
+ "eval_runtime": 8.9579,
30
+ "eval_samples_per_second": 55.816,
31
+ "eval_steps_per_second": 7.033,
32
+ "step": 250
33
+ },
34
+ {
35
+ "epoch": 1.2,
36
+ "grad_norm": 0.23669496178627014,
37
+ "learning_rate": 5e-05,
38
+ "loss": 1.4549,
39
+ "step": 300
40
+ },
41
+ {
42
+ "epoch": 1.6,
43
+ "grad_norm": 0.2622547745704651,
44
+ "learning_rate": 5e-05,
45
+ "loss": 1.4455,
46
+ "step": 400
47
+ },
48
+ {
49
+ "epoch": 2.0,
50
+ "grad_norm": 0.2481652796268463,
51
+ "learning_rate": 5e-05,
52
+ "loss": 1.435,
53
+ "step": 500
54
+ },
55
+ {
56
+ "epoch": 2.0,
57
+ "eval_accuracy": 0.6761542940320233,
58
+ "eval_loss": 1.5121686458587646,
59
+ "eval_runtime": 9.1287,
60
+ "eval_samples_per_second": 54.772,
61
+ "eval_steps_per_second": 6.901,
62
+ "step": 500
63
+ },
64
+ {
65
+ "epoch": 2.4,
66
+ "grad_norm": 0.32582077383995056,
67
+ "learning_rate": 5e-05,
68
+ "loss": 1.404,
69
+ "step": 600
70
+ },
71
+ {
72
+ "epoch": 2.8,
73
+ "grad_norm": 0.3612115681171417,
74
+ "learning_rate": 5e-05,
75
+ "loss": 1.395,
76
+ "step": 700
77
+ },
78
+ {
79
+ "epoch": 3.0,
80
+ "eval_accuracy": 0.6761222707423581,
81
+ "eval_loss": 1.5092321634292603,
82
+ "eval_runtime": 8.9213,
83
+ "eval_samples_per_second": 56.046,
84
+ "eval_steps_per_second": 7.062,
85
+ "step": 750
86
+ },
87
+ {
88
+ "epoch": 3.2,
89
+ "grad_norm": 0.3962060213088989,
90
+ "learning_rate": 5e-05,
91
+ "loss": 1.3741,
92
+ "step": 800
93
+ },
94
+ {
95
+ "epoch": 3.6,
96
+ "grad_norm": 0.43648457527160645,
97
+ "learning_rate": 5e-05,
98
+ "loss": 1.3432,
99
+ "step": 900
100
+ },
101
+ {
102
+ "epoch": 4.0,
103
+ "grad_norm": 0.4662795066833496,
104
+ "learning_rate": 5e-05,
105
+ "loss": 1.35,
106
+ "step": 1000
107
+ },
108
+ {
109
+ "epoch": 4.0,
110
+ "eval_accuracy": 0.6761222707423581,
111
+ "eval_loss": 1.5164991617202759,
112
+ "eval_runtime": 8.9033,
113
+ "eval_samples_per_second": 56.159,
114
+ "eval_steps_per_second": 7.076,
115
+ "step": 1000
116
+ },
117
+ {
118
+ "epoch": 4.4,
119
+ "grad_norm": 0.5122941136360168,
120
+ "learning_rate": 5e-05,
121
+ "loss": 1.2867,
122
+ "step": 1100
123
+ },
124
+ {
125
+ "epoch": 4.8,
126
+ "grad_norm": 0.5554733872413635,
127
+ "learning_rate": 5e-05,
128
+ "loss": 1.2906,
129
+ "step": 1200
130
+ },
131
+ {
132
+ "epoch": 5.0,
133
+ "eval_accuracy": 0.6754119359534206,
134
+ "eval_loss": 1.5308544635772705,
135
+ "eval_runtime": 9.1914,
136
+ "eval_samples_per_second": 54.398,
137
+ "eval_steps_per_second": 6.854,
138
+ "step": 1250
139
+ },
140
+ {
141
+ "epoch": 5.2,
142
+ "grad_norm": 0.559734582901001,
143
+ "learning_rate": 5e-05,
144
+ "loss": 1.2611,
145
+ "step": 1300
146
+ },
147
+ {
148
+ "epoch": 5.6,
149
+ "grad_norm": 0.6094681024551392,
150
+ "learning_rate": 5e-05,
151
+ "loss": 1.2358,
152
+ "step": 1400
153
+ },
154
+ {
155
+ "epoch": 6.0,
156
+ "grad_norm": 0.5562113523483276,
157
+ "learning_rate": 5e-05,
158
+ "loss": 1.2411,
159
+ "step": 1500
160
+ },
161
+ {
162
+ "epoch": 6.0,
163
+ "eval_accuracy": 0.6746579330422126,
164
+ "eval_loss": 1.5508921146392822,
165
+ "eval_runtime": 8.9295,
166
+ "eval_samples_per_second": 55.994,
167
+ "eval_steps_per_second": 7.055,
168
+ "step": 1500
169
+ },
170
+ {
171
+ "epoch": 6.4,
172
+ "grad_norm": 0.6852166652679443,
173
+ "learning_rate": 5e-05,
174
+ "loss": 1.1728,
175
+ "step": 1600
176
+ },
177
+ {
178
+ "epoch": 6.8,
179
+ "grad_norm": 0.6806530952453613,
180
+ "learning_rate": 5e-05,
181
+ "loss": 1.1833,
182
+ "step": 1700
183
+ },
184
+ {
185
+ "epoch": 7.0,
186
+ "eval_accuracy": 0.67372634643377,
187
+ "eval_loss": 1.5746535062789917,
188
+ "eval_runtime": 8.9136,
189
+ "eval_samples_per_second": 56.094,
190
+ "eval_steps_per_second": 7.068,
191
+ "step": 1750
192
+ },
193
+ {
194
+ "epoch": 7.2,
195
+ "grad_norm": 0.7315935492515564,
196
+ "learning_rate": 5e-05,
197
+ "loss": 1.1455,
198
+ "step": 1800
199
+ },
200
+ {
201
+ "epoch": 7.6,
202
+ "grad_norm": 0.764640748500824,
203
+ "learning_rate": 5e-05,
204
+ "loss": 1.1138,
205
+ "step": 1900
206
+ },
207
+ {
208
+ "epoch": 8.0,
209
+ "grad_norm": 0.7446439266204834,
210
+ "learning_rate": 5e-05,
211
+ "loss": 1.1198,
212
+ "step": 2000
213
+ },
214
+ {
215
+ "epoch": 8.0,
216
+ "eval_accuracy": 0.6726724890829694,
217
+ "eval_loss": 1.6129306554794312,
218
+ "eval_runtime": 9.1075,
219
+ "eval_samples_per_second": 54.9,
220
+ "eval_steps_per_second": 6.917,
221
+ "step": 2000
222
+ },
223
+ {
224
+ "epoch": 8.4,
225
+ "grad_norm": 0.7894338965415955,
226
+ "learning_rate": 5e-05,
227
+ "loss": 1.0558,
228
+ "step": 2100
229
+ },
230
+ {
231
+ "epoch": 8.8,
232
+ "grad_norm": 0.7857787013053894,
233
+ "learning_rate": 5e-05,
234
+ "loss": 1.0498,
235
+ "step": 2200
236
+ },
237
+ {
238
+ "epoch": 9.0,
239
+ "eval_accuracy": 0.6717409024745269,
240
+ "eval_loss": 1.6407346725463867,
241
+ "eval_runtime": 8.925,
242
+ "eval_samples_per_second": 56.022,
243
+ "eval_steps_per_second": 7.059,
244
+ "step": 2250
245
+ },
246
+ {
247
+ "epoch": 9.2,
248
+ "grad_norm": 0.9506841897964478,
249
+ "learning_rate": 5e-05,
250
+ "loss": 1.0287,
251
+ "step": 2300
252
+ },
253
+ {
254
+ "epoch": 9.6,
255
+ "grad_norm": 0.8976770639419556,
256
+ "learning_rate": 5e-05,
257
+ "loss": 0.989,
258
+ "step": 2400
259
+ },
260
+ {
261
+ "epoch": 10.0,
262
+ "grad_norm": 0.9163582921028137,
263
+ "learning_rate": 5e-05,
264
+ "loss": 1.0063,
265
+ "step": 2500
266
+ },
267
+ {
268
+ "epoch": 10.0,
269
+ "eval_accuracy": 0.6706142649199418,
270
+ "eval_loss": 1.6801503896713257,
271
+ "eval_runtime": 8.9042,
272
+ "eval_samples_per_second": 56.153,
273
+ "eval_steps_per_second": 7.075,
274
+ "step": 2500
275
+ },
276
+ {
277
+ "epoch": 10.4,
278
+ "grad_norm": 0.9549182653427124,
279
+ "learning_rate": 5e-05,
280
+ "loss": 0.9152,
281
+ "step": 2600
282
+ },
283
+ {
284
+ "epoch": 10.8,
285
+ "grad_norm": 0.9894323945045471,
286
+ "learning_rate": 5e-05,
287
+ "loss": 0.943,
288
+ "step": 2700
289
+ },
290
+ {
291
+ "epoch": 11.0,
292
+ "eval_accuracy": 0.6691470160116448,
293
+ "eval_loss": 1.7385432720184326,
294
+ "eval_runtime": 8.9087,
295
+ "eval_samples_per_second": 56.125,
296
+ "eval_steps_per_second": 7.072,
297
+ "step": 2750
298
+ },
299
+ {
300
+ "epoch": 11.2,
301
+ "grad_norm": 0.9682782888412476,
302
+ "learning_rate": 5e-05,
303
+ "loss": 0.9055,
304
+ "step": 2800
305
+ },
306
+ {
307
+ "epoch": 11.6,
308
+ "grad_norm": 1.0714006423950195,
309
+ "learning_rate": 5e-05,
310
+ "loss": 0.8608,
311
+ "step": 2900
312
+ },
313
+ {
314
+ "epoch": 12.0,
315
+ "grad_norm": 1.0484849214553833,
316
+ "learning_rate": 5e-05,
317
+ "loss": 0.8881,
318
+ "step": 3000
319
+ },
320
+ {
321
+ "epoch": 12.0,
322
+ "eval_accuracy": 0.6681339155749636,
323
+ "eval_loss": 1.7767282724380493,
324
+ "eval_runtime": 8.9212,
325
+ "eval_samples_per_second": 56.047,
326
+ "eval_steps_per_second": 7.062,
327
+ "step": 3000
328
+ },
329
+ {
330
+ "epoch": 12.4,
331
+ "grad_norm": 1.14182448387146,
332
+ "learning_rate": 5e-05,
333
+ "loss": 0.8,
334
+ "step": 3100
335
+ },
336
+ {
337
+ "epoch": 12.8,
338
+ "grad_norm": 1.1960476636886597,
339
+ "learning_rate": 5e-05,
340
+ "loss": 0.8176,
341
+ "step": 3200
342
+ },
343
+ {
344
+ "epoch": 13.0,
345
+ "eval_accuracy": 0.6668908296943231,
346
+ "eval_loss": 1.8361681699752808,
347
+ "eval_runtime": 9.1437,
348
+ "eval_samples_per_second": 54.683,
349
+ "eval_steps_per_second": 6.89,
350
+ "step": 3250
351
+ },
352
+ {
353
+ "epoch": 13.2,
354
+ "grad_norm": 1.1808342933654785,
355
+ "learning_rate": 5e-05,
356
+ "loss": 0.7838,
357
+ "step": 3300
358
+ },
359
+ {
360
+ "epoch": 13.6,
361
+ "grad_norm": 1.1940733194351196,
362
+ "learning_rate": 5e-05,
363
+ "loss": 0.7487,
364
+ "step": 3400
365
+ },
366
+ {
367
+ "epoch": 14.0,
368
+ "grad_norm": 1.162516474723816,
369
+ "learning_rate": 5e-05,
370
+ "loss": 0.7669,
371
+ "step": 3500
372
+ },
373
+ {
374
+ "epoch": 14.0,
375
+ "eval_accuracy": 0.6659330422125181,
376
+ "eval_loss": 1.8820244073867798,
377
+ "eval_runtime": 9.1343,
378
+ "eval_samples_per_second": 54.739,
379
+ "eval_steps_per_second": 6.897,
380
+ "step": 3500
381
+ },
382
+ {
383
+ "epoch": 14.4,
384
+ "grad_norm": 1.2329277992248535,
385
+ "learning_rate": 5e-05,
386
+ "loss": 0.6844,
387
+ "step": 3600
388
+ },
389
+ {
390
+ "epoch": 14.8,
391
+ "grad_norm": 1.6086229085922241,
392
+ "learning_rate": 5e-05,
393
+ "loss": 0.7119,
394
+ "step": 3700
395
+ },
396
+ {
397
+ "epoch": 15.0,
398
+ "eval_accuracy": 0.6648355167394469,
399
+ "eval_loss": 1.9358941316604614,
400
+ "eval_runtime": 9.0025,
401
+ "eval_samples_per_second": 55.54,
402
+ "eval_steps_per_second": 6.998,
403
+ "step": 3750
404
+ },
405
+ {
406
+ "epoch": 15.2,
407
+ "grad_norm": 1.2168347835540771,
408
+ "learning_rate": 5e-05,
409
+ "loss": 0.6843,
410
+ "step": 3800
411
+ },
412
+ {
413
+ "epoch": 15.6,
414
+ "grad_norm": 1.2260061502456665,
415
+ "learning_rate": 5e-05,
416
+ "loss": 0.6529,
417
+ "step": 3900
418
+ },
419
+ {
420
+ "epoch": 16.0,
421
+ "grad_norm": 1.3859333992004395,
422
+ "learning_rate": 5e-05,
423
+ "loss": 0.6564,
424
+ "step": 4000
425
+ },
426
+ {
427
+ "epoch": 16.0,
428
+ "eval_accuracy": 0.6637903930131004,
429
+ "eval_loss": 2.002925395965576,
430
+ "eval_runtime": 8.9056,
431
+ "eval_samples_per_second": 56.145,
432
+ "eval_steps_per_second": 7.074,
433
+ "step": 4000
434
+ },
435
+ {
436
+ "epoch": 16.4,
437
+ "grad_norm": 1.9796981811523438,
438
+ "learning_rate": 5e-05,
439
+ "loss": 0.5998,
440
+ "step": 4100
441
+ },
442
+ {
443
+ "epoch": 16.8,
444
+ "grad_norm": 1.38386869430542,
445
+ "learning_rate": 5e-05,
446
+ "loss": 0.6096,
447
+ "step": 4200
448
+ },
449
+ {
450
+ "epoch": 17.0,
451
+ "eval_accuracy": 0.6630509461426491,
452
+ "eval_loss": 2.059255599975586,
453
+ "eval_runtime": 8.9418,
454
+ "eval_samples_per_second": 55.917,
455
+ "eval_steps_per_second": 7.046,
456
+ "step": 4250
457
+ },
458
+ {
459
+ "epoch": 17.2,
460
+ "grad_norm": 1.3985453844070435,
461
+ "learning_rate": 5e-05,
462
+ "loss": 0.5877,
463
+ "step": 4300
464
+ },
465
+ {
466
+ "epoch": 17.6,
467
+ "grad_norm": 1.882461428642273,
468
+ "learning_rate": 5e-05,
469
+ "loss": 0.562,
470
+ "step": 4400
471
+ },
472
+ {
473
+ "epoch": 18.0,
474
+ "grad_norm": 1.8098194599151611,
475
+ "learning_rate": 5e-05,
476
+ "loss": 0.5715,
477
+ "step": 4500
478
+ },
479
+ {
480
+ "epoch": 18.0,
481
+ "eval_accuracy": 0.6621135371179039,
482
+ "eval_loss": 2.1330745220184326,
483
+ "eval_runtime": 8.9283,
484
+ "eval_samples_per_second": 56.002,
485
+ "eval_steps_per_second": 7.056,
486
+ "step": 4500
487
+ },
488
+ {
489
+ "epoch": 18.4,
490
+ "grad_norm": 1.81195068359375,
491
+ "learning_rate": 5e-05,
492
+ "loss": 0.5108,
493
+ "step": 4600
494
+ },
495
+ {
496
+ "epoch": 18.8,
497
+ "grad_norm": 1.3831859827041626,
498
+ "learning_rate": 5e-05,
499
+ "loss": 0.5293,
500
+ "step": 4700
501
+ },
502
+ {
503
+ "epoch": 19.0,
504
+ "eval_accuracy": 0.6616826783114993,
505
+ "eval_loss": 2.1593096256256104,
506
+ "eval_runtime": 8.9039,
507
+ "eval_samples_per_second": 56.155,
508
+ "eval_steps_per_second": 7.076,
509
+ "step": 4750
510
+ },
511
+ {
512
+ "epoch": 19.2,
513
+ "grad_norm": 1.40048086643219,
514
+ "learning_rate": 5e-05,
515
+ "loss": 0.505,
516
+ "step": 4800
517
+ },
518
+ {
519
+ "epoch": 19.6,
520
+ "grad_norm": 1.5969187021255493,
521
+ "learning_rate": 5e-05,
522
+ "loss": 0.4898,
523
+ "step": 4900
524
+ },
525
+ {
526
+ "epoch": 20.0,
527
+ "grad_norm": 1.691625952720642,
528
+ "learning_rate": 5e-05,
529
+ "loss": 0.4956,
530
+ "step": 5000
531
+ },
532
+ {
533
+ "epoch": 20.0,
534
+ "eval_accuracy": 0.6606986899563319,
535
+ "eval_loss": 2.2234957218170166,
536
+ "eval_runtime": 8.9083,
537
+ "eval_samples_per_second": 56.127,
538
+ "eval_steps_per_second": 7.072,
539
+ "step": 5000
540
+ },
541
+ {
542
+ "epoch": 20.0,
543
+ "step": 5000,
544
+ "total_flos": 9.221411586147615e+17,
545
+ "train_loss": 0.9736675170898438,
546
+ "train_runtime": 11083.462,
547
+ "train_samples_per_second": 14.436,
548
+ "train_steps_per_second": 0.451
549
+ }
550
+ ],
551
+ "logging_steps": 100,
552
+ "max_steps": 5000,
553
+ "num_input_tokens_seen": 0,
554
+ "num_train_epochs": 20,
555
+ "save_steps": 500,
556
+ "total_flos": 9.221411586147615e+17,
557
+ "train_batch_size": 1,
558
+ "trial_name": null,
559
+ "trial_params": null
560
+ }