wangrongsheng commited on
Commit
001d3a6
1 Parent(s): 1e97834

add weights

Browse files
README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: float16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
adapter_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "../Baichuan2-13B-Chat",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "W_pack"
18
+ ],
19
+ "task_type": "CAUSAL_LM"
20
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b46972030be120067c5284f694a8112aa47548e6471cabe67d85099dbfd1e3a5
3
+ size 26241825
all_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 1.5770846359804964,
4
+ "train_runtime": 13419.9836,
5
+ "train_samples_per_second": 10.287,
6
+ "train_steps_per_second": 0.08
7
+ }
checkpoint-1000/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: float16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "../Baichuan2-13B-Chat",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "W_pack"
18
+ ],
19
+ "task_type": "CAUSAL_LM"
20
+ }
checkpoint-1000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0c6f9a3827c723b798e96a73880310fa0efb72e74a648097dd0013eb97c5810
3
+ size 26241825
checkpoint-1000/finetuning_args.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dpo_beta": 0.1,
3
+ "finetuning_type": "lora",
4
+ "lora_alpha": 32.0,
5
+ "lora_dropout": 0.1,
6
+ "lora_rank": 8,
7
+ "lora_target": [
8
+ "W_pack"
9
+ ],
10
+ "name_module_trainable": "mlp",
11
+ "num_hidden_layers": 32,
12
+ "num_layer_trainable": 3,
13
+ "ppo_score_norm": false,
14
+ "resume_lora_training": true
15
+ }
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e621fce530ee2227ffe989ec8032812991cad2a5c4aecfeeb6a1502e7ea2681a
3
+ size 52476101
checkpoint-1000/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfed83d02d0194b678b514212b6419c48ddecec362a6a746c0840ab1cc9e860f
3
+ size 18679
checkpoint-1000/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91a0481f60411ef59e93bf24c9aa8fbd45d68712f754306cedf8bb1d2192b46f
3
+ size 18679
checkpoint-1000/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b04c95737623c2406475eeee1502802220425d11d23dc2b0774ba988f0046cb3
3
+ size 18679
checkpoint-1000/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d13689e3d2222e605b91993016c04d573d966012148b3341f4fb72a486be6118
3
+ size 18679
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e84f57a783241321dee02d8f3be0c1b87153d61108e1f3152d069e1c8ddc2b0
3
+ size 627
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,616 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.8544274455261938,
5
+ "global_step": 1000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.02,
12
+ "learning_rate": 4.998938447446803e-05,
13
+ "loss": 2.0078,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.04,
18
+ "learning_rate": 4.9957546913022665e-05,
19
+ "loss": 1.8414,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.06,
24
+ "learning_rate": 4.9904514353459654e-05,
25
+ "loss": 1.7763,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.07,
30
+ "learning_rate": 4.983033183325818e-05,
31
+ "loss": 1.7278,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.09,
36
+ "learning_rate": 4.973506235133323e-05,
37
+ "loss": 1.7139,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.11,
42
+ "learning_rate": 4.96313569658781e-05,
43
+ "loss": 1.6947,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.13,
48
+ "learning_rate": 4.94962599008322e-05,
49
+ "loss": 1.6749,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.15,
54
+ "learning_rate": 4.9340359581993066e-05,
55
+ "loss": 1.6579,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.17,
60
+ "learning_rate": 4.916378840646592e-05,
61
+ "loss": 1.6499,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.19,
66
+ "learning_rate": 4.8966696325916515e-05,
67
+ "loss": 1.6507,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.2,
72
+ "learning_rate": 4.8749250719225915e-05,
73
+ "loss": 1.6426,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.22,
78
+ "learning_rate": 4.8511636250345294e-05,
79
+ "loss": 1.6288,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.24,
84
+ "learning_rate": 4.825405471147153e-05,
85
+ "loss": 1.6265,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.26,
90
+ "learning_rate": 4.797672485167683e-05,
91
+ "loss": 1.6252,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.28,
96
+ "learning_rate": 4.7679882191137804e-05,
97
+ "loss": 1.6218,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.3,
102
+ "learning_rate": 4.7363778821121784e-05,
103
+ "loss": 1.6146,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.32,
108
+ "learning_rate": 4.702868318990039e-05,
109
+ "loss": 1.604,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.33,
114
+ "learning_rate": 4.6674879874771926e-05,
115
+ "loss": 1.607,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.35,
120
+ "learning_rate": 4.630266934038642e-05,
121
+ "loss": 1.5972,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.37,
126
+ "learning_rate": 4.591236768357833e-05,
127
+ "loss": 1.5973,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.39,
132
+ "learning_rate": 4.55043063649239e-05,
133
+ "loss": 1.6043,
134
+ "step": 210
135
+ },
136
+ {
137
+ "epoch": 0.41,
138
+ "learning_rate": 4.507883192725089e-05,
139
+ "loss": 1.5839,
140
+ "step": 220
141
+ },
142
+ {
143
+ "epoch": 0.43,
144
+ "learning_rate": 4.463630570133978e-05,
145
+ "loss": 1.5923,
146
+ "step": 230
147
+ },
148
+ {
149
+ "epoch": 0.45,
150
+ "learning_rate": 4.417710349906658e-05,
151
+ "loss": 1.5841,
152
+ "step": 240
153
+ },
154
+ {
155
+ "epoch": 0.46,
156
+ "learning_rate": 4.3701615294247465e-05,
157
+ "loss": 1.5977,
158
+ "step": 250
159
+ },
160
+ {
161
+ "epoch": 0.48,
162
+ "learning_rate": 4.321024489145673e-05,
163
+ "loss": 1.5954,
164
+ "step": 260
165
+ },
166
+ {
167
+ "epoch": 0.5,
168
+ "learning_rate": 4.270340958309888e-05,
169
+ "loss": 1.5914,
170
+ "step": 270
171
+ },
172
+ {
173
+ "epoch": 0.52,
174
+ "learning_rate": 4.2181539795026435e-05,
175
+ "loss": 1.577,
176
+ "step": 280
177
+ },
178
+ {
179
+ "epoch": 0.54,
180
+ "learning_rate": 4.1645078721004174e-05,
181
+ "loss": 1.576,
182
+ "step": 290
183
+ },
184
+ {
185
+ "epoch": 0.56,
186
+ "learning_rate": 4.109448194633033e-05,
187
+ "loss": 1.5771,
188
+ "step": 300
189
+ },
190
+ {
191
+ "epoch": 0.57,
192
+ "learning_rate": 4.0530217060934466e-05,
193
+ "loss": 1.5825,
194
+ "step": 310
195
+ },
196
+ {
197
+ "epoch": 0.59,
198
+ "learning_rate": 3.9952763262280405e-05,
199
+ "loss": 1.581,
200
+ "step": 320
201
+ },
202
+ {
203
+ "epoch": 0.61,
204
+ "learning_rate": 3.9362610948411585e-05,
205
+ "loss": 1.5691,
206
+ "step": 330
207
+ },
208
+ {
209
+ "epoch": 0.63,
210
+ "learning_rate": 3.8760261301484466e-05,
211
+ "loss": 1.5795,
212
+ "step": 340
213
+ },
214
+ {
215
+ "epoch": 0.65,
216
+ "learning_rate": 3.8208140480771856e-05,
217
+ "loss": 1.5847,
218
+ "step": 350
219
+ },
220
+ {
221
+ "epoch": 0.67,
222
+ "learning_rate": 3.758403340686345e-05,
223
+ "loss": 1.5737,
224
+ "step": 360
225
+ },
226
+ {
227
+ "epoch": 0.69,
228
+ "learning_rate": 3.6949239442720976e-05,
229
+ "loss": 1.5645,
230
+ "step": 370
231
+ },
232
+ {
233
+ "epoch": 0.7,
234
+ "learning_rate": 3.6304297682067144e-05,
235
+ "loss": 1.5659,
236
+ "step": 380
237
+ },
238
+ {
239
+ "epoch": 0.72,
240
+ "learning_rate": 3.5649755836560106e-05,
241
+ "loss": 1.5613,
242
+ "step": 390
243
+ },
244
+ {
245
+ "epoch": 0.74,
246
+ "learning_rate": 3.4986169770653685e-05,
247
+ "loss": 1.5725,
248
+ "step": 400
249
+ },
250
+ {
251
+ "epoch": 0.76,
252
+ "learning_rate": 3.431410302953389e-05,
253
+ "loss": 1.5619,
254
+ "step": 410
255
+ },
256
+ {
257
+ "epoch": 0.78,
258
+ "learning_rate": 3.363412636053269e-05,
259
+ "loss": 1.5615,
260
+ "step": 420
261
+ },
262
+ {
263
+ "epoch": 0.8,
264
+ "learning_rate": 3.294681722842537e-05,
265
+ "loss": 1.5623,
266
+ "step": 430
267
+ },
268
+ {
269
+ "epoch": 0.82,
270
+ "learning_rate": 3.225275932502315e-05,
271
+ "loss": 1.5685,
272
+ "step": 440
273
+ },
274
+ {
275
+ "epoch": 0.83,
276
+ "learning_rate": 3.1552542073477555e-05,
277
+ "loss": 1.566,
278
+ "step": 450
279
+ },
280
+ {
281
+ "epoch": 0.85,
282
+ "learning_rate": 3.084676012771753e-05,
283
+ "loss": 1.5643,
284
+ "step": 460
285
+ },
286
+ {
287
+ "epoch": 0.87,
288
+ "learning_rate": 3.0136012867444297e-05,
289
+ "loss": 1.5588,
290
+ "step": 470
291
+ },
292
+ {
293
+ "epoch": 0.89,
294
+ "learning_rate": 2.942090388911291e-05,
295
+ "loss": 1.5598,
296
+ "step": 480
297
+ },
298
+ {
299
+ "epoch": 0.91,
300
+ "learning_rate": 2.8702040493332778e-05,
301
+ "loss": 1.5618,
302
+ "step": 490
303
+ },
304
+ {
305
+ "epoch": 0.93,
306
+ "learning_rate": 2.7980033169122454e-05,
307
+ "loss": 1.5602,
308
+ "step": 500
309
+ },
310
+ {
311
+ "epoch": 0.95,
312
+ "learning_rate": 2.7255495075456693e-05,
313
+ "loss": 1.5453,
314
+ "step": 510
315
+ },
316
+ {
317
+ "epoch": 0.96,
318
+ "learning_rate": 2.652904152054607e-05,
319
+ "loss": 1.5557,
320
+ "step": 520
321
+ },
322
+ {
323
+ "epoch": 0.98,
324
+ "learning_rate": 2.5801289439291388e-05,
325
+ "loss": 1.5493,
326
+ "step": 530
327
+ },
328
+ {
329
+ "epoch": 1.0,
330
+ "learning_rate": 2.5072856869356593e-05,
331
+ "loss": 1.5527,
332
+ "step": 540
333
+ },
334
+ {
335
+ "epoch": 1.02,
336
+ "learning_rate": 2.4344362426305255e-05,
337
+ "loss": 1.5564,
338
+ "step": 550
339
+ },
340
+ {
341
+ "epoch": 1.04,
342
+ "learning_rate": 2.3616424778246173e-05,
343
+ "loss": 1.5514,
344
+ "step": 560
345
+ },
346
+ {
347
+ "epoch": 1.06,
348
+ "learning_rate": 2.2889662120434453e-05,
349
+ "loss": 1.5427,
350
+ "step": 570
351
+ },
352
+ {
353
+ "epoch": 1.08,
354
+ "learning_rate": 2.216469165027406e-05,
355
+ "loss": 1.5428,
356
+ "step": 580
357
+ },
358
+ {
359
+ "epoch": 1.09,
360
+ "learning_rate": 2.1442129043167874e-05,
361
+ "loss": 1.5441,
362
+ "step": 590
363
+ },
364
+ {
365
+ "epoch": 1.11,
366
+ "learning_rate": 2.0722587929660227e-05,
367
+ "loss": 1.552,
368
+ "step": 600
369
+ },
370
+ {
371
+ "epoch": 1.13,
372
+ "learning_rate": 2.0006679374316062e-05,
373
+ "loss": 1.5555,
374
+ "step": 610
375
+ },
376
+ {
377
+ "epoch": 1.15,
378
+ "learning_rate": 1.9295011356779192e-05,
379
+ "loss": 1.5504,
380
+ "step": 620
381
+ },
382
+ {
383
+ "epoch": 1.17,
384
+ "learning_rate": 1.8588188255450466e-05,
385
+ "loss": 1.5452,
386
+ "step": 630
387
+ },
388
+ {
389
+ "epoch": 1.19,
390
+ "learning_rate": 1.7886810334224192e-05,
391
+ "loss": 1.5502,
392
+ "step": 640
393
+ },
394
+ {
395
+ "epoch": 1.21,
396
+ "learning_rate": 1.7191473232718774e-05,
397
+ "loss": 1.556,
398
+ "step": 650
399
+ },
400
+ {
401
+ "epoch": 1.22,
402
+ "learning_rate": 1.6502767460434588e-05,
403
+ "loss": 1.5419,
404
+ "step": 660
405
+ },
406
+ {
407
+ "epoch": 1.24,
408
+ "learning_rate": 1.582127789526838e-05,
409
+ "loss": 1.5528,
410
+ "step": 670
411
+ },
412
+ {
413
+ "epoch": 1.26,
414
+ "learning_rate": 1.5147583286810485e-05,
415
+ "loss": 1.545,
416
+ "step": 680
417
+ },
418
+ {
419
+ "epoch": 1.28,
420
+ "learning_rate": 1.4482255764846225e-05,
421
+ "loss": 1.5433,
422
+ "step": 690
423
+ },
424
+ {
425
+ "epoch": 1.3,
426
+ "learning_rate": 1.3825860353479336e-05,
427
+ "loss": 1.551,
428
+ "step": 700
429
+ },
430
+ {
431
+ "epoch": 1.32,
432
+ "learning_rate": 1.3178954491289692e-05,
433
+ "loss": 1.5424,
434
+ "step": 710
435
+ },
436
+ {
437
+ "epoch": 1.34,
438
+ "learning_rate": 1.2542087557933041e-05,
439
+ "loss": 1.5426,
440
+ "step": 720
441
+ },
442
+ {
443
+ "epoch": 1.35,
444
+ "learning_rate": 1.1915800407584704e-05,
445
+ "loss": 1.5555,
446
+ "step": 730
447
+ },
448
+ {
449
+ "epoch": 1.37,
450
+ "learning_rate": 1.1300624909623463e-05,
451
+ "loss": 1.5335,
452
+ "step": 740
453
+ },
454
+ {
455
+ "epoch": 1.39,
456
+ "learning_rate": 1.0697083496945765e-05,
457
+ "loss": 1.5446,
458
+ "step": 750
459
+ },
460
+ {
461
+ "epoch": 1.41,
462
+ "learning_rate": 1.0105688722293643e-05,
463
+ "loss": 1.5422,
464
+ "step": 760
465
+ },
466
+ {
467
+ "epoch": 1.43,
468
+ "learning_rate": 9.526942822973522e-06,
469
+ "loss": 1.548,
470
+ "step": 770
471
+ },
472
+ {
473
+ "epoch": 1.45,
474
+ "learning_rate": 8.961337294335021e-06,
475
+ "loss": 1.5436,
476
+ "step": 780
477
+ },
478
+ {
479
+ "epoch": 1.46,
480
+ "learning_rate": 8.409352472372595e-06,
481
+ "loss": 1.5483,
482
+ "step": 790
483
+ },
484
+ {
485
+ "epoch": 1.48,
486
+ "learning_rate": 7.871457125803896e-06,
487
+ "loss": 1.5348,
488
+ "step": 800
489
+ },
490
+ {
491
+ "epoch": 1.5,
492
+ "learning_rate": 7.348108057971728e-06,
493
+ "loss": 1.5309,
494
+ "step": 810
495
+ },
496
+ {
497
+ "epoch": 1.52,
498
+ "learning_rate": 6.839749718907429e-06,
499
+ "loss": 1.5343,
500
+ "step": 820
501
+ },
502
+ {
503
+ "epoch": 1.54,
504
+ "learning_rate": 6.3468138278852174e-06,
505
+ "loss": 1.5446,
506
+ "step": 830
507
+ },
508
+ {
509
+ "epoch": 1.56,
510
+ "learning_rate": 5.8697190067880325e-06,
511
+ "loss": 1.5381,
512
+ "step": 840
513
+ },
514
+ {
515
+ "epoch": 1.58,
516
+ "learning_rate": 5.408870424596238e-06,
517
+ "loss": 1.5483,
518
+ "step": 850
519
+ },
520
+ {
521
+ "epoch": 1.59,
522
+ "learning_rate": 4.9646594533010875e-06,
523
+ "loss": 1.5466,
524
+ "step": 860
525
+ },
526
+ {
527
+ "epoch": 1.61,
528
+ "learning_rate": 4.537463335535161e-06,
529
+ "loss": 1.5397,
530
+ "step": 870
531
+ },
532
+ {
533
+ "epoch": 1.63,
534
+ "learning_rate": 4.127644864202104e-06,
535
+ "loss": 1.5355,
536
+ "step": 880
537
+ },
538
+ {
539
+ "epoch": 1.65,
540
+ "learning_rate": 3.735552074377563e-06,
541
+ "loss": 1.5409,
542
+ "step": 890
543
+ },
544
+ {
545
+ "epoch": 1.67,
546
+ "learning_rate": 3.3615179477432645e-06,
547
+ "loss": 1.5383,
548
+ "step": 900
549
+ },
550
+ {
551
+ "epoch": 1.69,
552
+ "learning_rate": 3.0058601298048774e-06,
553
+ "loss": 1.5338,
554
+ "step": 910
555
+ },
556
+ {
557
+ "epoch": 1.71,
558
+ "learning_rate": 2.6688806601341765e-06,
559
+ "loss": 1.5384,
560
+ "step": 920
561
+ },
562
+ {
563
+ "epoch": 1.72,
564
+ "learning_rate": 2.350865715864278e-06,
565
+ "loss": 1.5305,
566
+ "step": 930
567
+ },
568
+ {
569
+ "epoch": 1.74,
570
+ "learning_rate": 2.0520853686560178e-06,
571
+ "loss": 1.5344,
572
+ "step": 940
573
+ },
574
+ {
575
+ "epoch": 1.76,
576
+ "learning_rate": 1.772793355341734e-06,
577
+ "loss": 1.5363,
578
+ "step": 950
579
+ },
580
+ {
581
+ "epoch": 1.78,
582
+ "learning_rate": 1.513226862441286e-06,
583
+ "loss": 1.5468,
584
+ "step": 960
585
+ },
586
+ {
587
+ "epoch": 1.8,
588
+ "learning_rate": 1.273606324733284e-06,
589
+ "loss": 1.5369,
590
+ "step": 970
591
+ },
592
+ {
593
+ "epoch": 1.82,
594
+ "learning_rate": 1.0541352380526087e-06,
595
+ "loss": 1.5428,
596
+ "step": 980
597
+ },
598
+ {
599
+ "epoch": 1.84,
600
+ "learning_rate": 8.549999864732011e-07,
601
+ "loss": 1.5338,
602
+ "step": 990
603
+ },
604
+ {
605
+ "epoch": 1.85,
606
+ "learning_rate": 6.763696840228456e-07,
607
+ "loss": 1.5399,
608
+ "step": 1000
609
+ }
610
+ ],
611
+ "max_steps": 1078,
612
+ "num_train_epochs": 2,
613
+ "total_flos": 2.489434826550018e+18,
614
+ "trial_name": null,
615
+ "trial_params": null
616
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b69ea6b9f79d50f0ca7b5a2476627c8e100b6bcda45f5e45dfaba3d805792111
3
+ size 3264
finetuning_args.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dpo_beta": 0.1,
3
+ "finetuning_type": "lora",
4
+ "lora_alpha": 32.0,
5
+ "lora_dropout": 0.1,
6
+ "lora_rank": 8,
7
+ "lora_target": [
8
+ "W_pack"
9
+ ],
10
+ "name_module_trainable": "mlp",
11
+ "num_hidden_layers": 32,
12
+ "num_layer_trainable": 3,
13
+ "ppo_score_norm": false,
14
+ "resume_lora_training": true
15
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 1.5770846359804964,
4
+ "train_runtime": 13419.9836,
5
+ "train_samples_per_second": 10.287,
6
+ "train_steps_per_second": 0.08
7
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 1078, "loss": 2.0078, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.998938447446803e-05, "epoch": 0.02, "percentage": 0.93, "elapsed_time": "0:02:04", "remaining_time": "3:41:58"}
2
+ {"current_steps": 20, "total_steps": 1078, "loss": 1.8414, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9957546913022665e-05, "epoch": 0.04, "percentage": 1.86, "elapsed_time": "0:04:05", "remaining_time": "3:36:37"}
3
+ {"current_steps": 30, "total_steps": 1078, "loss": 1.7763, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9904514353459654e-05, "epoch": 0.06, "percentage": 2.78, "elapsed_time": "0:06:11", "remaining_time": "3:36:07"}
4
+ {"current_steps": 40, "total_steps": 1078, "loss": 1.7278, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.983033183325818e-05, "epoch": 0.07, "percentage": 3.71, "elapsed_time": "0:08:13", "remaining_time": "3:33:24"}
5
+ {"current_steps": 50, "total_steps": 1078, "loss": 1.7139, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.973506235133323e-05, "epoch": 0.09, "percentage": 4.64, "elapsed_time": "0:10:18", "remaining_time": "3:32:00"}
6
+ {"current_steps": 60, "total_steps": 1078, "loss": 1.6947, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.96313569658781e-05, "epoch": 0.11, "percentage": 5.57, "elapsed_time": "0:12:23", "remaining_time": "3:30:12"}
7
+ {"current_steps": 70, "total_steps": 1078, "loss": 1.6749, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.94962599008322e-05, "epoch": 0.13, "percentage": 6.49, "elapsed_time": "0:14:25", "remaining_time": "3:27:46"}
8
+ {"current_steps": 80, "total_steps": 1078, "loss": 1.6579, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9340359581993066e-05, "epoch": 0.15, "percentage": 7.42, "elapsed_time": "0:16:31", "remaining_time": "3:26:09"}
9
+ {"current_steps": 90, "total_steps": 1078, "loss": 1.6499, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.916378840646592e-05, "epoch": 0.17, "percentage": 8.35, "elapsed_time": "0:18:34", "remaining_time": "3:23:59"}
10
+ {"current_steps": 100, "total_steps": 1078, "loss": 1.6507, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8966696325916515e-05, "epoch": 0.19, "percentage": 9.28, "elapsed_time": "0:20:40", "remaining_time": "3:22:09"}
11
+ {"current_steps": 110, "total_steps": 1078, "loss": 1.6426, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8749250719225915e-05, "epoch": 0.2, "percentage": 10.2, "elapsed_time": "0:22:45", "remaining_time": "3:20:14"}
12
+ {"current_steps": 120, "total_steps": 1078, "loss": 1.6288, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8511636250345294e-05, "epoch": 0.22, "percentage": 11.13, "elapsed_time": "0:24:47", "remaining_time": "3:17:51"}
13
+ {"current_steps": 130, "total_steps": 1078, "loss": 1.6265, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.825405471147153e-05, "epoch": 0.24, "percentage": 12.06, "elapsed_time": "0:26:53", "remaining_time": "3:16:06"}
14
+ {"current_steps": 140, "total_steps": 1078, "loss": 1.6252, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.797672485167683e-05, "epoch": 0.26, "percentage": 12.99, "elapsed_time": "0:29:02", "remaining_time": "3:14:34"}
15
+ {"current_steps": 150, "total_steps": 1078, "loss": 1.6218, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7679882191137804e-05, "epoch": 0.28, "percentage": 13.91, "elapsed_time": "0:31:04", "remaining_time": "3:12:14"}
16
+ {"current_steps": 160, "total_steps": 1078, "loss": 1.6146, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.7363778821121784e-05, "epoch": 0.3, "percentage": 14.84, "elapsed_time": "0:33:07", "remaining_time": "3:10:03"}
17
+ {"current_steps": 170, "total_steps": 1078, "loss": 1.604, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.702868318990039e-05, "epoch": 0.32, "percentage": 15.77, "elapsed_time": "0:35:18", "remaining_time": "3:08:34"}
18
+ {"current_steps": 180, "total_steps": 1078, "loss": 1.607, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6674879874771926e-05, "epoch": 0.33, "percentage": 16.7, "elapsed_time": "0:37:28", "remaining_time": "3:06:59"}
19
+ {"current_steps": 190, "total_steps": 1078, "loss": 1.5972, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.630266934038642e-05, "epoch": 0.35, "percentage": 17.63, "elapsed_time": "0:39:31", "remaining_time": "3:04:45"}
20
+ {"current_steps": 200, "total_steps": 1078, "loss": 1.5973, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.591236768357833e-05, "epoch": 0.37, "percentage": 18.55, "elapsed_time": "0:41:37", "remaining_time": "3:02:43"}
21
+ {"current_steps": 210, "total_steps": 1078, "loss": 1.6043, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.55043063649239e-05, "epoch": 0.39, "percentage": 19.48, "elapsed_time": "0:43:42", "remaining_time": "3:00:37"}
22
+ {"current_steps": 220, "total_steps": 1078, "loss": 1.5839, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.507883192725089e-05, "epoch": 0.41, "percentage": 20.41, "elapsed_time": "0:45:44", "remaining_time": "2:58:22"}
23
+ {"current_steps": 230, "total_steps": 1078, "loss": 1.5923, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.463630570133978e-05, "epoch": 0.43, "percentage": 21.34, "elapsed_time": "0:47:51", "remaining_time": "2:56:25"}
24
+ {"current_steps": 240, "total_steps": 1078, "loss": 1.5841, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.417710349906658e-05, "epoch": 0.45, "percentage": 22.26, "elapsed_time": "0:49:55", "remaining_time": "2:54:20"}
25
+ {"current_steps": 250, "total_steps": 1078, "loss": 1.5977, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.3701615294247465e-05, "epoch": 0.46, "percentage": 23.19, "elapsed_time": "0:51:59", "remaining_time": "2:52:12"}
26
+ {"current_steps": 260, "total_steps": 1078, "loss": 1.5954, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.321024489145673e-05, "epoch": 0.48, "percentage": 24.12, "elapsed_time": "0:54:00", "remaining_time": "2:49:55"}
27
+ {"current_steps": 270, "total_steps": 1078, "loss": 1.5914, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.270340958309888e-05, "epoch": 0.5, "percentage": 25.05, "elapsed_time": "0:56:06", "remaining_time": "2:47:55"}
28
+ {"current_steps": 280, "total_steps": 1078, "loss": 1.577, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.2181539795026435e-05, "epoch": 0.52, "percentage": 25.97, "elapsed_time": "0:58:07", "remaining_time": "2:45:38"}
29
+ {"current_steps": 290, "total_steps": 1078, "loss": 1.576, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1645078721004174e-05, "epoch": 0.54, "percentage": 26.9, "elapsed_time": "1:00:09", "remaining_time": "2:43:28"}
30
+ {"current_steps": 300, "total_steps": 1078, "loss": 1.5771, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.109448194633033e-05, "epoch": 0.56, "percentage": 27.83, "elapsed_time": "1:02:11", "remaining_time": "2:41:17"}
31
+ {"current_steps": 310, "total_steps": 1078, "loss": 1.5825, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.0530217060934466e-05, "epoch": 0.57, "percentage": 28.76, "elapsed_time": "1:04:20", "remaining_time": "2:39:24"}
32
+ {"current_steps": 320, "total_steps": 1078, "loss": 1.581, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9952763262280405e-05, "epoch": 0.59, "percentage": 29.68, "elapsed_time": "1:06:22", "remaining_time": "2:37:13"}
33
+ {"current_steps": 330, "total_steps": 1078, "loss": 1.5691, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9362610948411585e-05, "epoch": 0.61, "percentage": 30.61, "elapsed_time": "1:08:29", "remaining_time": "2:35:13"}
34
+ {"current_steps": 340, "total_steps": 1078, "loss": 1.5795, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.8760261301484466e-05, "epoch": 0.63, "percentage": 31.54, "elapsed_time": "1:10:31", "remaining_time": "2:33:04"}
35
+ {"current_steps": 350, "total_steps": 1078, "loss": 1.5847, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.8208140480771856e-05, "epoch": 0.65, "percentage": 32.47, "elapsed_time": "1:12:35", "remaining_time": "2:30:58"}
36
+ {"current_steps": 360, "total_steps": 1078, "loss": 1.5737, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.758403340686345e-05, "epoch": 0.67, "percentage": 33.4, "elapsed_time": "1:14:37", "remaining_time": "2:28:51"}
37
+ {"current_steps": 370, "total_steps": 1078, "loss": 1.5645, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.6949239442720976e-05, "epoch": 0.69, "percentage": 34.32, "elapsed_time": "1:16:46", "remaining_time": "2:26:54"}
38
+ {"current_steps": 380, "total_steps": 1078, "loss": 1.5659, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.6304297682067144e-05, "epoch": 0.7, "percentage": 35.25, "elapsed_time": "1:18:55", "remaining_time": "2:24:58"}
39
+ {"current_steps": 390, "total_steps": 1078, "loss": 1.5613, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5649755836560106e-05, "epoch": 0.72, "percentage": 36.18, "elapsed_time": "1:21:02", "remaining_time": "2:22:57"}
40
+ {"current_steps": 400, "total_steps": 1078, "loss": 1.5725, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4986169770653685e-05, "epoch": 0.74, "percentage": 37.11, "elapsed_time": "1:23:05", "remaining_time": "2:20:50"}
41
+ {"current_steps": 410, "total_steps": 1078, "loss": 1.5619, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.431410302953389e-05, "epoch": 0.76, "percentage": 38.03, "elapsed_time": "1:25:17", "remaining_time": "2:18:57"}
42
+ {"current_steps": 420, "total_steps": 1078, "loss": 1.5615, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.363412636053269e-05, "epoch": 0.78, "percentage": 38.96, "elapsed_time": "1:27:19", "remaining_time": "2:16:48"}
43
+ {"current_steps": 430, "total_steps": 1078, "loss": 1.5623, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.294681722842537e-05, "epoch": 0.8, "percentage": 39.89, "elapsed_time": "1:29:22", "remaining_time": "2:14:40"}
44
+ {"current_steps": 440, "total_steps": 1078, "loss": 1.5685, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.225275932502315e-05, "epoch": 0.82, "percentage": 40.82, "elapsed_time": "1:31:28", "remaining_time": "2:12:38"}
45
+ {"current_steps": 450, "total_steps": 1078, "loss": 1.566, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.1552542073477555e-05, "epoch": 0.83, "percentage": 41.74, "elapsed_time": "1:33:32", "remaining_time": "2:10:31"}
46
+ {"current_steps": 460, "total_steps": 1078, "loss": 1.5643, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.084676012771753e-05, "epoch": 0.85, "percentage": 42.67, "elapsed_time": "1:35:32", "remaining_time": "2:08:21"}
47
+ {"current_steps": 470, "total_steps": 1078, "loss": 1.5588, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0136012867444297e-05, "epoch": 0.87, "percentage": 43.6, "elapsed_time": "1:37:35", "remaining_time": "2:06:15"}
48
+ {"current_steps": 480, "total_steps": 1078, "loss": 1.5598, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.942090388911291e-05, "epoch": 0.89, "percentage": 44.53, "elapsed_time": "1:39:45", "remaining_time": "2:04:16"}
49
+ {"current_steps": 490, "total_steps": 1078, "loss": 1.5618, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8702040493332778e-05, "epoch": 0.91, "percentage": 45.45, "elapsed_time": "1:41:52", "remaining_time": "2:02:15"}
50
+ {"current_steps": 500, "total_steps": 1078, "loss": 1.5602, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7980033169122454e-05, "epoch": 0.93, "percentage": 46.38, "elapsed_time": "1:43:57", "remaining_time": "2:00:10"}
51
+ {"current_steps": 510, "total_steps": 1078, "loss": 1.5453, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7255495075456693e-05, "epoch": 0.95, "percentage": 47.31, "elapsed_time": "1:46:04", "remaining_time": "1:58:08"}
52
+ {"current_steps": 520, "total_steps": 1078, "loss": 1.5557, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.652904152054607e-05, "epoch": 0.96, "percentage": 48.24, "elapsed_time": "1:48:04", "remaining_time": "1:55:58"}
53
+ {"current_steps": 530, "total_steps": 1078, "loss": 1.5493, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5801289439291388e-05, "epoch": 0.98, "percentage": 49.17, "elapsed_time": "1:50:10", "remaining_time": "1:53:55"}
54
+ {"current_steps": 540, "total_steps": 1078, "loss": 1.5527, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5072856869356593e-05, "epoch": 1.0, "percentage": 50.09, "elapsed_time": "1:52:15", "remaining_time": "1:51:50"}
55
+ {"current_steps": 550, "total_steps": 1078, "loss": 1.5564, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4344362426305255e-05, "epoch": 1.02, "percentage": 51.02, "elapsed_time": "1:54:18", "remaining_time": "1:49:44"}
56
+ {"current_steps": 560, "total_steps": 1078, "loss": 1.5514, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3616424778246173e-05, "epoch": 1.04, "percentage": 51.95, "elapsed_time": "1:56:22", "remaining_time": "1:47:38"}
57
+ {"current_steps": 570, "total_steps": 1078, "loss": 1.5427, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2889662120434453e-05, "epoch": 1.06, "percentage": 52.88, "elapsed_time": "1:58:25", "remaining_time": "1:45:32"}
58
+ {"current_steps": 580, "total_steps": 1078, "loss": 1.5428, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.216469165027406e-05, "epoch": 1.08, "percentage": 53.8, "elapsed_time": "2:00:33", "remaining_time": "1:43:30"}
59
+ {"current_steps": 590, "total_steps": 1078, "loss": 1.5441, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1442129043167874e-05, "epoch": 1.09, "percentage": 54.73, "elapsed_time": "2:02:36", "remaining_time": "1:41:24"}
60
+ {"current_steps": 600, "total_steps": 1078, "loss": 1.552, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0722587929660227e-05, "epoch": 1.11, "percentage": 55.66, "elapsed_time": "2:04:41", "remaining_time": "1:39:20"}
61
+ {"current_steps": 610, "total_steps": 1078, "loss": 1.5555, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0006679374316062e-05, "epoch": 1.13, "percentage": 56.59, "elapsed_time": "2:06:44", "remaining_time": "1:37:14"}
62
+ {"current_steps": 620, "total_steps": 1078, "loss": 1.5504, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9295011356779192e-05, "epoch": 1.15, "percentage": 57.51, "elapsed_time": "2:08:51", "remaining_time": "1:35:11"}
63
+ {"current_steps": 630, "total_steps": 1078, "loss": 1.5452, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8588188255450466e-05, "epoch": 1.17, "percentage": 58.44, "elapsed_time": "2:10:52", "remaining_time": "1:33:03"}
64
+ {"current_steps": 640, "total_steps": 1078, "loss": 1.5502, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7886810334224192e-05, "epoch": 1.19, "percentage": 59.37, "elapsed_time": "2:12:57", "remaining_time": "1:30:59"}
65
+ {"current_steps": 650, "total_steps": 1078, "loss": 1.556, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7191473232718774e-05, "epoch": 1.21, "percentage": 60.3, "elapsed_time": "2:14:58", "remaining_time": "1:28:52"}
66
+ {"current_steps": 660, "total_steps": 1078, "loss": 1.5419, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6502767460434588e-05, "epoch": 1.22, "percentage": 61.22, "elapsed_time": "2:17:04", "remaining_time": "1:26:48"}
67
+ {"current_steps": 670, "total_steps": 1078, "loss": 1.5528, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.582127789526838e-05, "epoch": 1.24, "percentage": 62.15, "elapsed_time": "2:19:05", "remaining_time": "1:24:42"}
68
+ {"current_steps": 680, "total_steps": 1078, "loss": 1.545, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5147583286810485e-05, "epoch": 1.26, "percentage": 63.08, "elapsed_time": "2:21:05", "remaining_time": "1:22:34"}
69
+ {"current_steps": 690, "total_steps": 1078, "loss": 1.5433, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4482255764846225e-05, "epoch": 1.28, "percentage": 64.01, "elapsed_time": "2:23:07", "remaining_time": "1:20:29"}
70
+ {"current_steps": 700, "total_steps": 1078, "loss": 1.551, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3825860353479336e-05, "epoch": 1.3, "percentage": 64.94, "elapsed_time": "2:25:10", "remaining_time": "1:18:23"}
71
+ {"current_steps": 710, "total_steps": 1078, "loss": 1.5424, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3178954491289692e-05, "epoch": 1.32, "percentage": 65.86, "elapsed_time": "2:27:17", "remaining_time": "1:16:20"}
72
+ {"current_steps": 720, "total_steps": 1078, "loss": 1.5426, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2542087557933041e-05, "epoch": 1.34, "percentage": 66.79, "elapsed_time": "2:29:26", "remaining_time": "1:14:18"}
73
+ {"current_steps": 730, "total_steps": 1078, "loss": 1.5555, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1915800407584704e-05, "epoch": 1.35, "percentage": 67.72, "elapsed_time": "2:31:27", "remaining_time": "1:12:12"}
74
+ {"current_steps": 740, "total_steps": 1078, "loss": 1.5335, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1300624909623463e-05, "epoch": 1.37, "percentage": 68.65, "elapsed_time": "2:33:33", "remaining_time": "1:10:08"}
75
+ {"current_steps": 750, "total_steps": 1078, "loss": 1.5446, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0697083496945765e-05, "epoch": 1.39, "percentage": 69.57, "elapsed_time": "2:35:37", "remaining_time": "1:08:03"}
76
+ {"current_steps": 760, "total_steps": 1078, "loss": 1.5422, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0105688722293643e-05, "epoch": 1.41, "percentage": 70.5, "elapsed_time": "2:37:41", "remaining_time": "1:05:59"}
77
+ {"current_steps": 770, "total_steps": 1078, "loss": 1.548, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.526942822973522e-06, "epoch": 1.43, "percentage": 71.43, "elapsed_time": "2:39:41", "remaining_time": "1:03:52"}
78
+ {"current_steps": 780, "total_steps": 1078, "loss": 1.5436, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.961337294335021e-06, "epoch": 1.45, "percentage": 72.36, "elapsed_time": "2:41:41", "remaining_time": "1:01:46"}
79
+ {"current_steps": 790, "total_steps": 1078, "loss": 1.5483, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.409352472372595e-06, "epoch": 1.46, "percentage": 73.28, "elapsed_time": "2:43:50", "remaining_time": "0:59:43"}
80
+ {"current_steps": 800, "total_steps": 1078, "loss": 1.5348, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.871457125803896e-06, "epoch": 1.48, "percentage": 74.21, "elapsed_time": "2:45:53", "remaining_time": "0:57:38"}
81
+ {"current_steps": 810, "total_steps": 1078, "loss": 1.5309, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.348108057971728e-06, "epoch": 1.5, "percentage": 75.14, "elapsed_time": "2:47:54", "remaining_time": "0:55:33"}
82
+ {"current_steps": 820, "total_steps": 1078, "loss": 1.5343, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.839749718907429e-06, "epoch": 1.52, "percentage": 76.07, "elapsed_time": "2:49:54", "remaining_time": "0:53:27"}
83
+ {"current_steps": 830, "total_steps": 1078, "loss": 1.5446, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.3468138278852174e-06, "epoch": 1.54, "percentage": 76.99, "elapsed_time": "2:51:58", "remaining_time": "0:51:23"}
84
+ {"current_steps": 840, "total_steps": 1078, "loss": 1.5381, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.8697190067880325e-06, "epoch": 1.56, "percentage": 77.92, "elapsed_time": "2:54:02", "remaining_time": "0:49:18"}
85
+ {"current_steps": 850, "total_steps": 1078, "loss": 1.5483, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.408870424596238e-06, "epoch": 1.58, "percentage": 78.85, "elapsed_time": "2:56:07", "remaining_time": "0:47:14"}
86
+ {"current_steps": 860, "total_steps": 1078, "loss": 1.5466, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9646594533010875e-06, "epoch": 1.59, "percentage": 79.78, "elapsed_time": "2:58:08", "remaining_time": "0:45:09"}
87
+ {"current_steps": 870, "total_steps": 1078, "loss": 1.5397, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.537463335535161e-06, "epoch": 1.61, "percentage": 80.71, "elapsed_time": "3:00:12", "remaining_time": "0:43:05"}
88
+ {"current_steps": 880, "total_steps": 1078, "loss": 1.5355, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.127644864202104e-06, "epoch": 1.63, "percentage": 81.63, "elapsed_time": "3:02:16", "remaining_time": "0:41:00"}
89
+ {"current_steps": 890, "total_steps": 1078, "loss": 1.5409, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.735552074377563e-06, "epoch": 1.65, "percentage": 82.56, "elapsed_time": "3:04:21", "remaining_time": "0:38:56"}
90
+ {"current_steps": 900, "total_steps": 1078, "loss": 1.5383, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3615179477432645e-06, "epoch": 1.67, "percentage": 83.49, "elapsed_time": "3:06:27", "remaining_time": "0:36:52"}
91
+ {"current_steps": 910, "total_steps": 1078, "loss": 1.5338, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0058601298048774e-06, "epoch": 1.69, "percentage": 84.42, "elapsed_time": "3:08:29", "remaining_time": "0:34:47"}
92
+ {"current_steps": 920, "total_steps": 1078, "loss": 1.5384, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6688806601341765e-06, "epoch": 1.71, "percentage": 85.34, "elapsed_time": "3:10:36", "remaining_time": "0:32:44"}
93
+ {"current_steps": 930, "total_steps": 1078, "loss": 1.5305, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.350865715864278e-06, "epoch": 1.72, "percentage": 86.27, "elapsed_time": "3:12:40", "remaining_time": "0:30:39"}
94
+ {"current_steps": 940, "total_steps": 1078, "loss": 1.5344, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0520853686560178e-06, "epoch": 1.74, "percentage": 87.2, "elapsed_time": "3:14:43", "remaining_time": "0:28:35"}
95
+ {"current_steps": 950, "total_steps": 1078, "loss": 1.5363, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.772793355341734e-06, "epoch": 1.76, "percentage": 88.13, "elapsed_time": "3:16:48", "remaining_time": "0:26:30"}
96
+ {"current_steps": 960, "total_steps": 1078, "loss": 1.5468, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.513226862441286e-06, "epoch": 1.78, "percentage": 89.05, "elapsed_time": "3:18:51", "remaining_time": "0:24:26"}
97
+ {"current_steps": 970, "total_steps": 1078, "loss": 1.5369, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.273606324733284e-06, "epoch": 1.8, "percentage": 89.98, "elapsed_time": "3:20:55", "remaining_time": "0:22:22"}
98
+ {"current_steps": 980, "total_steps": 1078, "loss": 1.5428, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0541352380526087e-06, "epoch": 1.82, "percentage": 90.91, "elapsed_time": "3:23:03", "remaining_time": "0:20:18"}
99
+ {"current_steps": 990, "total_steps": 1078, "loss": 1.5338, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.549999864732011e-07, "epoch": 1.84, "percentage": 91.84, "elapsed_time": "3:25:14", "remaining_time": "0:18:14"}
100
+ {"current_steps": 1000, "total_steps": 1078, "loss": 1.5399, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.763696840228456e-07, "epoch": 1.85, "percentage": 92.76, "elapsed_time": "3:27:17", "remaining_time": "0:16:10"}
101
+ {"current_steps": 1010, "total_steps": 1078, "loss": 1.5406, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.183960310644748e-07, "epoch": 1.87, "percentage": 93.69, "elapsed_time": "3:29:21", "remaining_time": "0:14:05"}
102
+ {"current_steps": 1020, "total_steps": 1078, "loss": 1.5381, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.812131854657813e-07, "epoch": 1.89, "percentage": 94.62, "elapsed_time": "3:31:29", "remaining_time": "0:12:01"}
103
+ {"current_steps": 1030, "total_steps": 1078, "loss": 1.5354, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.649376486667743e-07, "epoch": 1.91, "percentage": 95.55, "elapsed_time": "3:33:35", "remaining_time": "0:09:57"}
104
+ {"current_steps": 1040, "total_steps": 1078, "loss": 1.5358, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6966816674182373e-07, "epoch": 1.93, "percentage": 96.47, "elapsed_time": "3:35:41", "remaining_time": "0:07:52"}
105
+ {"current_steps": 1050, "total_steps": 1078, "loss": 1.5481, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.019530491929821e-07, "epoch": 1.95, "percentage": 97.4, "elapsed_time": "3:37:50", "remaining_time": "0:05:48"}
106
+ {"current_steps": 1060, "total_steps": 1078, "loss": 1.5504, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.680317016582669e-08, "epoch": 1.97, "percentage": 98.33, "elapsed_time": "3:39:54", "remaining_time": "0:03:44"}
107
+ {"current_steps": 1070, "total_steps": 1078, "loss": 1.5429, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.284459498280266e-08, "epoch": 1.98, "percentage": 99.26, "elapsed_time": "3:41:57", "remaining_time": "0:01:39"}
108
+ {"current_steps": 1078, "total_steps": 1078, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "3:43:39", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.999072786277237,
5
+ "global_step": 1078,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.02,
12
+ "learning_rate": 4.998938447446803e-05,
13
+ "loss": 2.0078,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.04,
18
+ "learning_rate": 4.9957546913022665e-05,
19
+ "loss": 1.8414,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.06,
24
+ "learning_rate": 4.9904514353459654e-05,
25
+ "loss": 1.7763,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.07,
30
+ "learning_rate": 4.983033183325818e-05,
31
+ "loss": 1.7278,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.09,
36
+ "learning_rate": 4.973506235133323e-05,
37
+ "loss": 1.7139,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.11,
42
+ "learning_rate": 4.96313569658781e-05,
43
+ "loss": 1.6947,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.13,
48
+ "learning_rate": 4.94962599008322e-05,
49
+ "loss": 1.6749,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.15,
54
+ "learning_rate": 4.9340359581993066e-05,
55
+ "loss": 1.6579,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.17,
60
+ "learning_rate": 4.916378840646592e-05,
61
+ "loss": 1.6499,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.19,
66
+ "learning_rate": 4.8966696325916515e-05,
67
+ "loss": 1.6507,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.2,
72
+ "learning_rate": 4.8749250719225915e-05,
73
+ "loss": 1.6426,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.22,
78
+ "learning_rate": 4.8511636250345294e-05,
79
+ "loss": 1.6288,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.24,
84
+ "learning_rate": 4.825405471147153e-05,
85
+ "loss": 1.6265,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.26,
90
+ "learning_rate": 4.797672485167683e-05,
91
+ "loss": 1.6252,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.28,
96
+ "learning_rate": 4.7679882191137804e-05,
97
+ "loss": 1.6218,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.3,
102
+ "learning_rate": 4.7363778821121784e-05,
103
+ "loss": 1.6146,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.32,
108
+ "learning_rate": 4.702868318990039e-05,
109
+ "loss": 1.604,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.33,
114
+ "learning_rate": 4.6674879874771926e-05,
115
+ "loss": 1.607,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.35,
120
+ "learning_rate": 4.630266934038642e-05,
121
+ "loss": 1.5972,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.37,
126
+ "learning_rate": 4.591236768357833e-05,
127
+ "loss": 1.5973,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.39,
132
+ "learning_rate": 4.55043063649239e-05,
133
+ "loss": 1.6043,
134
+ "step": 210
135
+ },
136
+ {
137
+ "epoch": 0.41,
138
+ "learning_rate": 4.507883192725089e-05,
139
+ "loss": 1.5839,
140
+ "step": 220
141
+ },
142
+ {
143
+ "epoch": 0.43,
144
+ "learning_rate": 4.463630570133978e-05,
145
+ "loss": 1.5923,
146
+ "step": 230
147
+ },
148
+ {
149
+ "epoch": 0.45,
150
+ "learning_rate": 4.417710349906658e-05,
151
+ "loss": 1.5841,
152
+ "step": 240
153
+ },
154
+ {
155
+ "epoch": 0.46,
156
+ "learning_rate": 4.3701615294247465e-05,
157
+ "loss": 1.5977,
158
+ "step": 250
159
+ },
160
+ {
161
+ "epoch": 0.48,
162
+ "learning_rate": 4.321024489145673e-05,
163
+ "loss": 1.5954,
164
+ "step": 260
165
+ },
166
+ {
167
+ "epoch": 0.5,
168
+ "learning_rate": 4.270340958309888e-05,
169
+ "loss": 1.5914,
170
+ "step": 270
171
+ },
172
+ {
173
+ "epoch": 0.52,
174
+ "learning_rate": 4.2181539795026435e-05,
175
+ "loss": 1.577,
176
+ "step": 280
177
+ },
178
+ {
179
+ "epoch": 0.54,
180
+ "learning_rate": 4.1645078721004174e-05,
181
+ "loss": 1.576,
182
+ "step": 290
183
+ },
184
+ {
185
+ "epoch": 0.56,
186
+ "learning_rate": 4.109448194633033e-05,
187
+ "loss": 1.5771,
188
+ "step": 300
189
+ },
190
+ {
191
+ "epoch": 0.57,
192
+ "learning_rate": 4.0530217060934466e-05,
193
+ "loss": 1.5825,
194
+ "step": 310
195
+ },
196
+ {
197
+ "epoch": 0.59,
198
+ "learning_rate": 3.9952763262280405e-05,
199
+ "loss": 1.581,
200
+ "step": 320
201
+ },
202
+ {
203
+ "epoch": 0.61,
204
+ "learning_rate": 3.9362610948411585e-05,
205
+ "loss": 1.5691,
206
+ "step": 330
207
+ },
208
+ {
209
+ "epoch": 0.63,
210
+ "learning_rate": 3.8760261301484466e-05,
211
+ "loss": 1.5795,
212
+ "step": 340
213
+ },
214
+ {
215
+ "epoch": 0.65,
216
+ "learning_rate": 3.8208140480771856e-05,
217
+ "loss": 1.5847,
218
+ "step": 350
219
+ },
220
+ {
221
+ "epoch": 0.67,
222
+ "learning_rate": 3.758403340686345e-05,
223
+ "loss": 1.5737,
224
+ "step": 360
225
+ },
226
+ {
227
+ "epoch": 0.69,
228
+ "learning_rate": 3.6949239442720976e-05,
229
+ "loss": 1.5645,
230
+ "step": 370
231
+ },
232
+ {
233
+ "epoch": 0.7,
234
+ "learning_rate": 3.6304297682067144e-05,
235
+ "loss": 1.5659,
236
+ "step": 380
237
+ },
238
+ {
239
+ "epoch": 0.72,
240
+ "learning_rate": 3.5649755836560106e-05,
241
+ "loss": 1.5613,
242
+ "step": 390
243
+ },
244
+ {
245
+ "epoch": 0.74,
246
+ "learning_rate": 3.4986169770653685e-05,
247
+ "loss": 1.5725,
248
+ "step": 400
249
+ },
250
+ {
251
+ "epoch": 0.76,
252
+ "learning_rate": 3.431410302953389e-05,
253
+ "loss": 1.5619,
254
+ "step": 410
255
+ },
256
+ {
257
+ "epoch": 0.78,
258
+ "learning_rate": 3.363412636053269e-05,
259
+ "loss": 1.5615,
260
+ "step": 420
261
+ },
262
+ {
263
+ "epoch": 0.8,
264
+ "learning_rate": 3.294681722842537e-05,
265
+ "loss": 1.5623,
266
+ "step": 430
267
+ },
268
+ {
269
+ "epoch": 0.82,
270
+ "learning_rate": 3.225275932502315e-05,
271
+ "loss": 1.5685,
272
+ "step": 440
273
+ },
274
+ {
275
+ "epoch": 0.83,
276
+ "learning_rate": 3.1552542073477555e-05,
277
+ "loss": 1.566,
278
+ "step": 450
279
+ },
280
+ {
281
+ "epoch": 0.85,
282
+ "learning_rate": 3.084676012771753e-05,
283
+ "loss": 1.5643,
284
+ "step": 460
285
+ },
286
+ {
287
+ "epoch": 0.87,
288
+ "learning_rate": 3.0136012867444297e-05,
289
+ "loss": 1.5588,
290
+ "step": 470
291
+ },
292
+ {
293
+ "epoch": 0.89,
294
+ "learning_rate": 2.942090388911291e-05,
295
+ "loss": 1.5598,
296
+ "step": 480
297
+ },
298
+ {
299
+ "epoch": 0.91,
300
+ "learning_rate": 2.8702040493332778e-05,
301
+ "loss": 1.5618,
302
+ "step": 490
303
+ },
304
+ {
305
+ "epoch": 0.93,
306
+ "learning_rate": 2.7980033169122454e-05,
307
+ "loss": 1.5602,
308
+ "step": 500
309
+ },
310
+ {
311
+ "epoch": 0.95,
312
+ "learning_rate": 2.7255495075456693e-05,
313
+ "loss": 1.5453,
314
+ "step": 510
315
+ },
316
+ {
317
+ "epoch": 0.96,
318
+ "learning_rate": 2.652904152054607e-05,
319
+ "loss": 1.5557,
320
+ "step": 520
321
+ },
322
+ {
323
+ "epoch": 0.98,
324
+ "learning_rate": 2.5801289439291388e-05,
325
+ "loss": 1.5493,
326
+ "step": 530
327
+ },
328
+ {
329
+ "epoch": 1.0,
330
+ "learning_rate": 2.5072856869356593e-05,
331
+ "loss": 1.5527,
332
+ "step": 540
333
+ },
334
+ {
335
+ "epoch": 1.02,
336
+ "learning_rate": 2.4344362426305255e-05,
337
+ "loss": 1.5564,
338
+ "step": 550
339
+ },
340
+ {
341
+ "epoch": 1.04,
342
+ "learning_rate": 2.3616424778246173e-05,
343
+ "loss": 1.5514,
344
+ "step": 560
345
+ },
346
+ {
347
+ "epoch": 1.06,
348
+ "learning_rate": 2.2889662120434453e-05,
349
+ "loss": 1.5427,
350
+ "step": 570
351
+ },
352
+ {
353
+ "epoch": 1.08,
354
+ "learning_rate": 2.216469165027406e-05,
355
+ "loss": 1.5428,
356
+ "step": 580
357
+ },
358
+ {
359
+ "epoch": 1.09,
360
+ "learning_rate": 2.1442129043167874e-05,
361
+ "loss": 1.5441,
362
+ "step": 590
363
+ },
364
+ {
365
+ "epoch": 1.11,
366
+ "learning_rate": 2.0722587929660227e-05,
367
+ "loss": 1.552,
368
+ "step": 600
369
+ },
370
+ {
371
+ "epoch": 1.13,
372
+ "learning_rate": 2.0006679374316062e-05,
373
+ "loss": 1.5555,
374
+ "step": 610
375
+ },
376
+ {
377
+ "epoch": 1.15,
378
+ "learning_rate": 1.9295011356779192e-05,
379
+ "loss": 1.5504,
380
+ "step": 620
381
+ },
382
+ {
383
+ "epoch": 1.17,
384
+ "learning_rate": 1.8588188255450466e-05,
385
+ "loss": 1.5452,
386
+ "step": 630
387
+ },
388
+ {
389
+ "epoch": 1.19,
390
+ "learning_rate": 1.7886810334224192e-05,
391
+ "loss": 1.5502,
392
+ "step": 640
393
+ },
394
+ {
395
+ "epoch": 1.21,
396
+ "learning_rate": 1.7191473232718774e-05,
397
+ "loss": 1.556,
398
+ "step": 650
399
+ },
400
+ {
401
+ "epoch": 1.22,
402
+ "learning_rate": 1.6502767460434588e-05,
403
+ "loss": 1.5419,
404
+ "step": 660
405
+ },
406
+ {
407
+ "epoch": 1.24,
408
+ "learning_rate": 1.582127789526838e-05,
409
+ "loss": 1.5528,
410
+ "step": 670
411
+ },
412
+ {
413
+ "epoch": 1.26,
414
+ "learning_rate": 1.5147583286810485e-05,
415
+ "loss": 1.545,
416
+ "step": 680
417
+ },
418
+ {
419
+ "epoch": 1.28,
420
+ "learning_rate": 1.4482255764846225e-05,
421
+ "loss": 1.5433,
422
+ "step": 690
423
+ },
424
+ {
425
+ "epoch": 1.3,
426
+ "learning_rate": 1.3825860353479336e-05,
427
+ "loss": 1.551,
428
+ "step": 700
429
+ },
430
+ {
431
+ "epoch": 1.32,
432
+ "learning_rate": 1.3178954491289692e-05,
433
+ "loss": 1.5424,
434
+ "step": 710
435
+ },
436
+ {
437
+ "epoch": 1.34,
438
+ "learning_rate": 1.2542087557933041e-05,
439
+ "loss": 1.5426,
440
+ "step": 720
441
+ },
442
+ {
443
+ "epoch": 1.35,
444
+ "learning_rate": 1.1915800407584704e-05,
445
+ "loss": 1.5555,
446
+ "step": 730
447
+ },
448
+ {
449
+ "epoch": 1.37,
450
+ "learning_rate": 1.1300624909623463e-05,
451
+ "loss": 1.5335,
452
+ "step": 740
453
+ },
454
+ {
455
+ "epoch": 1.39,
456
+ "learning_rate": 1.0697083496945765e-05,
457
+ "loss": 1.5446,
458
+ "step": 750
459
+ },
460
+ {
461
+ "epoch": 1.41,
462
+ "learning_rate": 1.0105688722293643e-05,
463
+ "loss": 1.5422,
464
+ "step": 760
465
+ },
466
+ {
467
+ "epoch": 1.43,
468
+ "learning_rate": 9.526942822973522e-06,
469
+ "loss": 1.548,
470
+ "step": 770
471
+ },
472
+ {
473
+ "epoch": 1.45,
474
+ "learning_rate": 8.961337294335021e-06,
475
+ "loss": 1.5436,
476
+ "step": 780
477
+ },
478
+ {
479
+ "epoch": 1.46,
480
+ "learning_rate": 8.409352472372595e-06,
481
+ "loss": 1.5483,
482
+ "step": 790
483
+ },
484
+ {
485
+ "epoch": 1.48,
486
+ "learning_rate": 7.871457125803896e-06,
487
+ "loss": 1.5348,
488
+ "step": 800
489
+ },
490
+ {
491
+ "epoch": 1.5,
492
+ "learning_rate": 7.348108057971728e-06,
493
+ "loss": 1.5309,
494
+ "step": 810
495
+ },
496
+ {
497
+ "epoch": 1.52,
498
+ "learning_rate": 6.839749718907429e-06,
499
+ "loss": 1.5343,
500
+ "step": 820
501
+ },
502
+ {
503
+ "epoch": 1.54,
504
+ "learning_rate": 6.3468138278852174e-06,
505
+ "loss": 1.5446,
506
+ "step": 830
507
+ },
508
+ {
509
+ "epoch": 1.56,
510
+ "learning_rate": 5.8697190067880325e-06,
511
+ "loss": 1.5381,
512
+ "step": 840
513
+ },
514
+ {
515
+ "epoch": 1.58,
516
+ "learning_rate": 5.408870424596238e-06,
517
+ "loss": 1.5483,
518
+ "step": 850
519
+ },
520
+ {
521
+ "epoch": 1.59,
522
+ "learning_rate": 4.9646594533010875e-06,
523
+ "loss": 1.5466,
524
+ "step": 860
525
+ },
526
+ {
527
+ "epoch": 1.61,
528
+ "learning_rate": 4.537463335535161e-06,
529
+ "loss": 1.5397,
530
+ "step": 870
531
+ },
532
+ {
533
+ "epoch": 1.63,
534
+ "learning_rate": 4.127644864202104e-06,
535
+ "loss": 1.5355,
536
+ "step": 880
537
+ },
538
+ {
539
+ "epoch": 1.65,
540
+ "learning_rate": 3.735552074377563e-06,
541
+ "loss": 1.5409,
542
+ "step": 890
543
+ },
544
+ {
545
+ "epoch": 1.67,
546
+ "learning_rate": 3.3615179477432645e-06,
547
+ "loss": 1.5383,
548
+ "step": 900
549
+ },
550
+ {
551
+ "epoch": 1.69,
552
+ "learning_rate": 3.0058601298048774e-06,
553
+ "loss": 1.5338,
554
+ "step": 910
555
+ },
556
+ {
557
+ "epoch": 1.71,
558
+ "learning_rate": 2.6688806601341765e-06,
559
+ "loss": 1.5384,
560
+ "step": 920
561
+ },
562
+ {
563
+ "epoch": 1.72,
564
+ "learning_rate": 2.350865715864278e-06,
565
+ "loss": 1.5305,
566
+ "step": 930
567
+ },
568
+ {
569
+ "epoch": 1.74,
570
+ "learning_rate": 2.0520853686560178e-06,
571
+ "loss": 1.5344,
572
+ "step": 940
573
+ },
574
+ {
575
+ "epoch": 1.76,
576
+ "learning_rate": 1.772793355341734e-06,
577
+ "loss": 1.5363,
578
+ "step": 950
579
+ },
580
+ {
581
+ "epoch": 1.78,
582
+ "learning_rate": 1.513226862441286e-06,
583
+ "loss": 1.5468,
584
+ "step": 960
585
+ },
586
+ {
587
+ "epoch": 1.8,
588
+ "learning_rate": 1.273606324733284e-06,
589
+ "loss": 1.5369,
590
+ "step": 970
591
+ },
592
+ {
593
+ "epoch": 1.82,
594
+ "learning_rate": 1.0541352380526087e-06,
595
+ "loss": 1.5428,
596
+ "step": 980
597
+ },
598
+ {
599
+ "epoch": 1.84,
600
+ "learning_rate": 8.549999864732011e-07,
601
+ "loss": 1.5338,
602
+ "step": 990
603
+ },
604
+ {
605
+ "epoch": 1.85,
606
+ "learning_rate": 6.763696840228456e-07,
607
+ "loss": 1.5399,
608
+ "step": 1000
609
+ },
610
+ {
611
+ "epoch": 1.87,
612
+ "learning_rate": 5.183960310644748e-07,
613
+ "loss": 1.5406,
614
+ "step": 1010
615
+ },
616
+ {
617
+ "epoch": 1.89,
618
+ "learning_rate": 3.812131854657813e-07,
619
+ "loss": 1.5381,
620
+ "step": 1020
621
+ },
622
+ {
623
+ "epoch": 1.91,
624
+ "learning_rate": 2.649376486667743e-07,
625
+ "loss": 1.5354,
626
+ "step": 1030
627
+ },
628
+ {
629
+ "epoch": 1.93,
630
+ "learning_rate": 1.6966816674182373e-07,
631
+ "loss": 1.5358,
632
+ "step": 1040
633
+ },
634
+ {
635
+ "epoch": 1.95,
636
+ "learning_rate": 1.019530491929821e-07,
637
+ "loss": 1.5481,
638
+ "step": 1050
639
+ },
640
+ {
641
+ "epoch": 1.97,
642
+ "learning_rate": 4.680317016582669e-08,
643
+ "loss": 1.5504,
644
+ "step": 1060
645
+ },
646
+ {
647
+ "epoch": 1.98,
648
+ "learning_rate": 1.284459498280266e-08,
649
+ "loss": 1.5429,
650
+ "step": 1070
651
+ },
652
+ {
653
+ "epoch": 2.0,
654
+ "step": 1078,
655
+ "total_flos": 2.684311584557236e+18,
656
+ "train_loss": 1.5770846359804964,
657
+ "train_runtime": 13419.9836,
658
+ "train_samples_per_second": 10.287,
659
+ "train_steps_per_second": 0.08
660
+ }
661
+ ],
662
+ "max_steps": 1078,
663
+ "num_train_epochs": 2,
664
+ "total_flos": 2.684311584557236e+18,
665
+ "trial_name": null,
666
+ "trial_params": null
667
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b69ea6b9f79d50f0ca7b5a2476627c8e100b6bcda45f5e45dfaba3d805792111
3
+ size 3264
training_loss.png ADDED