Phương commited on
Commit
6397d91
1 Parent(s): 1f4ac16

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fa0189c84963fea95cc47f6089a306258747e9e4831836f0be11695bdf891b5
3
+ size 62787853
checkpoint-100/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-100/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03f21124954311f7b7fb3feaa80eaa9b93b92678fc3f10388fbe1e88babd117a
3
+ size 62787853
checkpoint-100/adapter_model/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-100/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-100/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03f21124954311f7b7fb3feaa80eaa9b93b92678fc3f10388fbe1e88babd117a
3
+ size 62787853
checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d35b0bc0d1a11bb01408f685e08994fe51915f45aa1c364e7986413b65ce207a
3
+ size 15984958
checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:370c3a07f37a8aae6ea141b54ca992b21699546baf7407eb587b6056f787333b
3
+ size 14575
checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41455f7f4f2bd1024973206c2d8ad16e78af8d1f2ebdfb3a66f7efb193382148
3
+ size 627
checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,776 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0016680567139282,
5
+ "global_step": 100,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.02,
12
+ "learning_rate": 2e-05,
13
+ "loss": 1.7915,
14
+ "step": 1
15
+ },
16
+ {
17
+ "epoch": 0.04,
18
+ "learning_rate": 4e-05,
19
+ "loss": 1.7449,
20
+ "step": 2
21
+ },
22
+ {
23
+ "epoch": 0.06,
24
+ "learning_rate": 6e-05,
25
+ "loss": 1.7538,
26
+ "step": 3
27
+ },
28
+ {
29
+ "epoch": 0.08,
30
+ "learning_rate": 8e-05,
31
+ "loss": 1.713,
32
+ "step": 4
33
+ },
34
+ {
35
+ "epoch": 0.1,
36
+ "learning_rate": 0.0001,
37
+ "loss": 1.7313,
38
+ "step": 5
39
+ },
40
+ {
41
+ "epoch": 0.1,
42
+ "eval_loss": 1.6295605897903442,
43
+ "eval_runtime": 6.0457,
44
+ "eval_samples_per_second": 0.331,
45
+ "eval_steps_per_second": 0.331,
46
+ "step": 5
47
+ },
48
+ {
49
+ "epoch": 0.12,
50
+ "learning_rate": 0.00012,
51
+ "loss": 1.8184,
52
+ "step": 6
53
+ },
54
+ {
55
+ "epoch": 0.14,
56
+ "learning_rate": 0.00014,
57
+ "loss": 1.7586,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.16,
62
+ "learning_rate": 0.00016,
63
+ "loss": 1.8926,
64
+ "step": 8
65
+ },
66
+ {
67
+ "epoch": 0.18,
68
+ "learning_rate": 0.00018,
69
+ "loss": 1.9176,
70
+ "step": 9
71
+ },
72
+ {
73
+ "epoch": 0.2,
74
+ "learning_rate": 0.0002,
75
+ "loss": 1.584,
76
+ "step": 10
77
+ },
78
+ {
79
+ "epoch": 0.2,
80
+ "eval_loss": 1.606095790863037,
81
+ "eval_runtime": 6.0412,
82
+ "eval_samples_per_second": 0.331,
83
+ "eval_steps_per_second": 0.331,
84
+ "step": 10
85
+ },
86
+ {
87
+ "epoch": 0.22,
88
+ "learning_rate": 0.00019997370884991842,
89
+ "loss": 1.2008,
90
+ "step": 11
91
+ },
92
+ {
93
+ "epoch": 0.24,
94
+ "learning_rate": 0.00019989484922416502,
95
+ "loss": 1.2531,
96
+ "step": 12
97
+ },
98
+ {
99
+ "epoch": 0.26,
100
+ "learning_rate": 0.00019976346258894503,
101
+ "loss": 1.6787,
102
+ "step": 13
103
+ },
104
+ {
105
+ "epoch": 0.28,
106
+ "learning_rate": 0.00019957961803037326,
107
+ "loss": 1.7146,
108
+ "step": 14
109
+ },
110
+ {
111
+ "epoch": 0.3,
112
+ "learning_rate": 0.00019934341221814739,
113
+ "loss": 1.6937,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.3,
118
+ "eval_loss": 1.5841087102890015,
119
+ "eval_runtime": 6.0341,
120
+ "eval_samples_per_second": 0.331,
121
+ "eval_steps_per_second": 0.331,
122
+ "step": 15
123
+ },
124
+ {
125
+ "epoch": 0.32,
126
+ "learning_rate": 0.00019905496935471658,
127
+ "loss": 1.7531,
128
+ "step": 16
129
+ },
130
+ {
131
+ "epoch": 0.34,
132
+ "learning_rate": 0.0001987144411099731,
133
+ "loss": 1.7094,
134
+ "step": 17
135
+ },
136
+ {
137
+ "epoch": 0.36,
138
+ "learning_rate": 0.00019832200654150076,
139
+ "loss": 1.7333,
140
+ "step": 18
141
+ },
142
+ {
143
+ "epoch": 0.38,
144
+ "learning_rate": 0.00019787787200042223,
145
+ "loss": 1.7736,
146
+ "step": 19
147
+ },
148
+ {
149
+ "epoch": 0.4,
150
+ "learning_rate": 0.0001973822710228951,
151
+ "loss": 1.6655,
152
+ "step": 20
153
+ },
154
+ {
155
+ "epoch": 0.4,
156
+ "eval_loss": 1.5755609273910522,
157
+ "eval_runtime": 6.0389,
158
+ "eval_samples_per_second": 0.331,
159
+ "eval_steps_per_second": 0.331,
160
+ "step": 20
161
+ },
162
+ {
163
+ "epoch": 0.42,
164
+ "learning_rate": 0.0001968354642073129,
165
+ "loss": 1.8054,
166
+ "step": 21
167
+ },
168
+ {
169
+ "epoch": 0.44,
170
+ "learning_rate": 0.00019623773907727682,
171
+ "loss": 1.8327,
172
+ "step": 22
173
+ },
174
+ {
175
+ "epoch": 0.46,
176
+ "learning_rate": 0.00019558940993040885,
177
+ "loss": 1.3822,
178
+ "step": 23
179
+ },
180
+ {
181
+ "epoch": 0.48,
182
+ "learning_rate": 0.00019489081767308698,
183
+ "loss": 1.254,
184
+ "step": 24
185
+ },
186
+ {
187
+ "epoch": 0.5,
188
+ "learning_rate": 0.00019414232964118892,
189
+ "loss": 1.7648,
190
+ "step": 25
191
+ },
192
+ {
193
+ "epoch": 0.5,
194
+ "eval_loss": 1.5711314678192139,
195
+ "eval_runtime": 6.0254,
196
+ "eval_samples_per_second": 0.332,
197
+ "eval_steps_per_second": 0.332,
198
+ "step": 25
199
+ },
200
+ {
201
+ "epoch": 0.52,
202
+ "learning_rate": 0.0001933443394069383,
203
+ "loss": 1.641,
204
+ "step": 26
205
+ },
206
+ {
207
+ "epoch": 0.54,
208
+ "learning_rate": 0.00019249726657195532,
209
+ "loss": 1.7777,
210
+ "step": 27
211
+ },
212
+ {
213
+ "epoch": 0.56,
214
+ "learning_rate": 0.00019160155654662076,
215
+ "loss": 1.7038,
216
+ "step": 28
217
+ },
218
+ {
219
+ "epoch": 0.58,
220
+ "learning_rate": 0.0001906576803158686,
221
+ "loss": 1.6179,
222
+ "step": 29
223
+ },
224
+ {
225
+ "epoch": 0.6,
226
+ "learning_rate": 0.0001896661341915318,
227
+ "loss": 1.7813,
228
+ "step": 30
229
+ },
230
+ {
231
+ "epoch": 0.6,
232
+ "eval_loss": 1.5650010108947754,
233
+ "eval_runtime": 6.0228,
234
+ "eval_samples_per_second": 0.332,
235
+ "eval_steps_per_second": 0.332,
236
+ "step": 30
237
+ },
238
+ {
239
+ "epoch": 0.62,
240
+ "learning_rate": 0.00018862743955136966,
241
+ "loss": 1.661,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.64,
246
+ "learning_rate": 0.00018754214256491562,
247
+ "loss": 1.7948,
248
+ "step": 32
249
+ },
250
+ {
251
+ "epoch": 0.66,
252
+ "learning_rate": 0.00018641081390628877,
253
+ "loss": 1.92,
254
+ "step": 33
255
+ },
256
+ {
257
+ "epoch": 0.68,
258
+ "learning_rate": 0.00018523404845412027,
259
+ "loss": 1.6941,
260
+ "step": 34
261
+ },
262
+ {
263
+ "epoch": 0.7,
264
+ "learning_rate": 0.0001840124649787524,
265
+ "loss": 1.3461,
266
+ "step": 35
267
+ },
268
+ {
269
+ "epoch": 0.7,
270
+ "eval_loss": 1.5624847412109375,
271
+ "eval_runtime": 6.0325,
272
+ "eval_samples_per_second": 0.332,
273
+ "eval_steps_per_second": 0.332,
274
+ "step": 35
275
+ },
276
+ {
277
+ "epoch": 0.72,
278
+ "learning_rate": 0.0001827467058168748,
279
+ "loss": 0.8176,
280
+ "step": 36
281
+ },
282
+ {
283
+ "epoch": 0.74,
284
+ "learning_rate": 0.00018143743653376942,
285
+ "loss": 1.7262,
286
+ "step": 37
287
+ },
288
+ {
289
+ "epoch": 0.76,
290
+ "learning_rate": 0.00018008534557334064,
291
+ "loss": 1.7333,
292
+ "step": 38
293
+ },
294
+ {
295
+ "epoch": 0.78,
296
+ "learning_rate": 0.00017869114389611575,
297
+ "loss": 1.5991,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.8,
302
+ "learning_rate": 0.0001772555646054055,
303
+ "loss": 1.7267,
304
+ "step": 40
305
+ },
306
+ {
307
+ "epoch": 0.8,
308
+ "eval_loss": 1.5579214096069336,
309
+ "eval_runtime": 6.0388,
310
+ "eval_samples_per_second": 0.331,
311
+ "eval_steps_per_second": 0.331,
312
+ "step": 40
313
+ },
314
+ {
315
+ "epoch": 0.82,
316
+ "learning_rate": 0.00017577936256182167,
317
+ "loss": 1.6694,
318
+ "step": 41
319
+ },
320
+ {
321
+ "epoch": 0.84,
322
+ "learning_rate": 0.0001742633139863538,
323
+ "loss": 1.8201,
324
+ "step": 42
325
+ },
326
+ {
327
+ "epoch": 0.86,
328
+ "learning_rate": 0.0001727082160522145,
329
+ "loss": 1.7913,
330
+ "step": 43
331
+ },
332
+ {
333
+ "epoch": 0.88,
334
+ "learning_rate": 0.00017111488646566727,
335
+ "loss": 1.825,
336
+ "step": 44
337
+ },
338
+ {
339
+ "epoch": 0.9,
340
+ "learning_rate": 0.00016948416303605795,
341
+ "loss": 1.7778,
342
+ "step": 45
343
+ },
344
+ {
345
+ "epoch": 0.9,
346
+ "eval_loss": 1.555617094039917,
347
+ "eval_runtime": 6.0421,
348
+ "eval_samples_per_second": 0.331,
349
+ "eval_steps_per_second": 0.331,
350
+ "step": 45
351
+ },
352
+ {
353
+ "epoch": 0.92,
354
+ "learning_rate": 0.00016781690323527511,
355
+ "loss": 1.6311,
356
+ "step": 46
357
+ },
358
+ {
359
+ "epoch": 0.94,
360
+ "learning_rate": 0.0001661139837468717,
361
+ "loss": 1.1499,
362
+ "step": 47
363
+ },
364
+ {
365
+ "epoch": 0.96,
366
+ "learning_rate": 0.00016437630000508464,
367
+ "loss": 1.0455,
368
+ "step": 48
369
+ },
370
+ {
371
+ "epoch": 0.98,
372
+ "learning_rate": 0.00016260476572399496,
373
+ "loss": 1.7178,
374
+ "step": 49
375
+ },
376
+ {
377
+ "epoch": 1.0,
378
+ "learning_rate": 0.00016080031241707578,
379
+ "loss": 1.4832,
380
+ "step": 50
381
+ },
382
+ {
383
+ "epoch": 1.0,
384
+ "eval_loss": 1.554579257965088,
385
+ "eval_runtime": 6.0239,
386
+ "eval_samples_per_second": 0.332,
387
+ "eval_steps_per_second": 0.332,
388
+ "step": 50
389
+ },
390
+ {
391
+ "epoch": 1.02,
392
+ "learning_rate": 0.00015896388890738127,
393
+ "loss": 1.6801,
394
+ "step": 51
395
+ },
396
+ {
397
+ "epoch": 1.04,
398
+ "learning_rate": 0.0001570964608286336,
399
+ "loss": 1.6462,
400
+ "step": 52
401
+ },
402
+ {
403
+ "epoch": 1.06,
404
+ "learning_rate": 0.00015519901011747044,
405
+ "loss": 1.7264,
406
+ "step": 53
407
+ },
408
+ {
409
+ "epoch": 1.08,
410
+ "learning_rate": 0.0001532725344971202,
411
+ "loss": 1.63,
412
+ "step": 54
413
+ },
414
+ {
415
+ "epoch": 1.1,
416
+ "learning_rate": 0.00015131804695277612,
417
+ "loss": 1.7584,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 1.1,
422
+ "eval_loss": 1.5519572496414185,
423
+ "eval_runtime": 6.0181,
424
+ "eval_samples_per_second": 0.332,
425
+ "eval_steps_per_second": 0.332,
426
+ "step": 55
427
+ },
428
+ {
429
+ "epoch": 1.12,
430
+ "learning_rate": 0.0001493365751989454,
431
+ "loss": 1.7956,
432
+ "step": 56
433
+ },
434
+ {
435
+ "epoch": 1.14,
436
+ "learning_rate": 0.00014732916113905335,
437
+ "loss": 1.6528,
438
+ "step": 57
439
+ },
440
+ {
441
+ "epoch": 1.16,
442
+ "learning_rate": 0.00014529686031758643,
443
+ "loss": 1.7006,
444
+ "step": 58
445
+ },
446
+ {
447
+ "epoch": 1.18,
448
+ "learning_rate": 0.00014324074136506284,
449
+ "loss": 1.8171,
450
+ "step": 59
451
+ },
452
+ {
453
+ "epoch": 1.2,
454
+ "learning_rate": 0.0001411618854361218,
455
+ "loss": 1.4825,
456
+ "step": 60
457
+ },
458
+ {
459
+ "epoch": 1.2,
460
+ "eval_loss": 1.5512959957122803,
461
+ "eval_runtime": 6.0143,
462
+ "eval_samples_per_second": 0.333,
463
+ "eval_steps_per_second": 0.333,
464
+ "step": 60
465
+ },
466
+ {
467
+ "epoch": 1.22,
468
+ "learning_rate": 0.00013906138564102793,
469
+ "loss": 0.9645,
470
+ "step": 61
471
+ },
472
+ {
473
+ "epoch": 1.24,
474
+ "learning_rate": 0.0001369403464708884,
475
+ "loss": 1.2708,
476
+ "step": 62
477
+ },
478
+ {
479
+ "epoch": 1.26,
480
+ "learning_rate": 0.0001347998832168862,
481
+ "loss": 1.7432,
482
+ "step": 63
483
+ },
484
+ {
485
+ "epoch": 1.28,
486
+ "learning_rate": 0.00013264112138383445,
487
+ "loss": 1.6986,
488
+ "step": 64
489
+ },
490
+ {
491
+ "epoch": 1.3,
492
+ "learning_rate": 0.00013046519609836,
493
+ "loss": 1.7969,
494
+ "step": 65
495
+ },
496
+ {
497
+ "epoch": 1.3,
498
+ "eval_loss": 1.5507731437683105,
499
+ "eval_runtime": 6.0363,
500
+ "eval_samples_per_second": 0.331,
501
+ "eval_steps_per_second": 0.331,
502
+ "step": 65
503
+ },
504
+ {
505
+ "epoch": 1.32,
506
+ "learning_rate": 0.00012827325151202782,
507
+ "loss": 1.5769,
508
+ "step": 66
509
+ },
510
+ {
511
+ "epoch": 1.34,
512
+ "learning_rate": 0.00012606644019971968,
513
+ "loss": 1.7011,
514
+ "step": 67
515
+ },
516
+ {
517
+ "epoch": 1.36,
518
+ "learning_rate": 0.00012384592255358385,
519
+ "loss": 1.6525,
520
+ "step": 68
521
+ },
522
+ {
523
+ "epoch": 1.38,
524
+ "learning_rate": 0.00012161286617287419,
525
+ "loss": 1.6834,
526
+ "step": 69
527
+ },
528
+ {
529
+ "epoch": 1.4,
530
+ "learning_rate": 0.00011936844524999966,
531
+ "loss": 1.7182,
532
+ "step": 70
533
+ },
534
+ {
535
+ "epoch": 1.4,
536
+ "eval_loss": 1.549034595489502,
537
+ "eval_runtime": 6.0315,
538
+ "eval_samples_per_second": 0.332,
539
+ "eval_steps_per_second": 0.332,
540
+ "step": 70
541
+ },
542
+ {
543
+ "epoch": 1.42,
544
+ "learning_rate": 0.00011711383995310681,
545
+ "loss": 1.7941,
546
+ "step": 71
547
+ },
548
+ {
549
+ "epoch": 1.44,
550
+ "learning_rate": 0.00011485023580552039,
551
+ "loss": 1.6289,
552
+ "step": 72
553
+ },
554
+ {
555
+ "epoch": 1.46,
556
+ "learning_rate": 0.00011257882306236775,
557
+ "loss": 1.1176,
558
+ "step": 73
559
+ },
560
+ {
561
+ "epoch": 1.48,
562
+ "learning_rate": 0.00011030079608471544,
563
+ "loss": 0.8403,
564
+ "step": 74
565
+ },
566
+ {
567
+ "epoch": 1.5,
568
+ "learning_rate": 0.00010801735271154669,
569
+ "loss": 1.724,
570
+ "step": 75
571
+ },
572
+ {
573
+ "epoch": 1.5,
574
+ "eval_loss": 1.5494024753570557,
575
+ "eval_runtime": 6.0321,
576
+ "eval_samples_per_second": 0.332,
577
+ "eval_steps_per_second": 0.332,
578
+ "step": 75
579
+ },
580
+ {
581
+ "epoch": 1.52,
582
+ "learning_rate": 0.00010572969362990998,
583
+ "loss": 1.6746,
584
+ "step": 76
585
+ },
586
+ {
587
+ "epoch": 1.54,
588
+ "learning_rate": 0.00010343902174357039,
589
+ "loss": 1.7152,
590
+ "step": 77
591
+ },
592
+ {
593
+ "epoch": 1.56,
594
+ "learning_rate": 0.0001011465415404949,
595
+ "loss": 1.7054,
596
+ "step": 78
597
+ },
598
+ {
599
+ "epoch": 1.58,
600
+ "learning_rate": 9.88534584595051e-05,
601
+ "loss": 1.7333,
602
+ "step": 79
603
+ },
604
+ {
605
+ "epoch": 1.6,
606
+ "learning_rate": 9.656097825642961e-05,
607
+ "loss": 1.7083,
608
+ "step": 80
609
+ },
610
+ {
611
+ "epoch": 1.6,
612
+ "eval_loss": 1.5476174354553223,
613
+ "eval_runtime": 6.0289,
614
+ "eval_samples_per_second": 0.332,
615
+ "eval_steps_per_second": 0.332,
616
+ "step": 80
617
+ },
618
+ {
619
+ "epoch": 1.62,
620
+ "learning_rate": 9.427030637009003e-05,
621
+ "loss": 1.8771,
622
+ "step": 81
623
+ },
624
+ {
625
+ "epoch": 1.64,
626
+ "learning_rate": 9.198264728845332e-05,
627
+ "loss": 1.8446,
628
+ "step": 82
629
+ },
630
+ {
631
+ "epoch": 1.66,
632
+ "learning_rate": 8.969920391528458e-05,
633
+ "loss": 1.86,
634
+ "step": 83
635
+ },
636
+ {
637
+ "epoch": 1.68,
638
+ "learning_rate": 8.742117693763227e-05,
639
+ "loss": 1.7291,
640
+ "step": 84
641
+ },
642
+ {
643
+ "epoch": 1.7,
644
+ "learning_rate": 8.514976419447964e-05,
645
+ "loss": 1.2195,
646
+ "step": 85
647
+ },
648
+ {
649
+ "epoch": 1.7,
650
+ "eval_loss": 1.5478819608688354,
651
+ "eval_runtime": 6.0353,
652
+ "eval_samples_per_second": 0.331,
653
+ "eval_steps_per_second": 0.331,
654
+ "step": 85
655
+ },
656
+ {
657
+ "epoch": 1.72,
658
+ "learning_rate": 8.28861600468932e-05,
659
+ "loss": 0.9047,
660
+ "step": 86
661
+ },
662
+ {
663
+ "epoch": 1.74,
664
+ "learning_rate": 8.063155475000037e-05,
665
+ "loss": 1.7297,
666
+ "step": 87
667
+ },
668
+ {
669
+ "epoch": 1.76,
670
+ "learning_rate": 7.838713382712583e-05,
671
+ "loss": 1.5731,
672
+ "step": 88
673
+ },
674
+ {
675
+ "epoch": 1.78,
676
+ "learning_rate": 7.615407744641619e-05,
677
+ "loss": 1.598,
678
+ "step": 89
679
+ },
680
+ {
681
+ "epoch": 1.8,
682
+ "learning_rate": 7.393355980028039e-05,
683
+ "loss": 1.6782,
684
+ "step": 90
685
+ },
686
+ {
687
+ "epoch": 1.8,
688
+ "eval_loss": 1.5477495193481445,
689
+ "eval_runtime": 6.0384,
690
+ "eval_samples_per_second": 0.331,
691
+ "eval_steps_per_second": 0.331,
692
+ "step": 90
693
+ },
694
+ {
695
+ "epoch": 1.82,
696
+ "learning_rate": 7.172674848797219e-05,
697
+ "loss": 1.672,
698
+ "step": 91
699
+ },
700
+ {
701
+ "epoch": 1.84,
702
+ "learning_rate": 6.953480390164e-05,
703
+ "loss": 1.6457,
704
+ "step": 92
705
+ },
706
+ {
707
+ "epoch": 1.86,
708
+ "learning_rate": 6.735887861616556e-05,
709
+ "loss": 1.7155,
710
+ "step": 93
711
+ },
712
+ {
713
+ "epoch": 1.88,
714
+ "learning_rate": 6.520011678311382e-05,
715
+ "loss": 1.7334,
716
+ "step": 94
717
+ },
718
+ {
719
+ "epoch": 1.9,
720
+ "learning_rate": 6.305965352911161e-05,
721
+ "loss": 1.7342,
722
+ "step": 95
723
+ },
724
+ {
725
+ "epoch": 1.9,
726
+ "eval_loss": 1.5465799570083618,
727
+ "eval_runtime": 6.0244,
728
+ "eval_samples_per_second": 0.332,
729
+ "eval_steps_per_second": 0.332,
730
+ "step": 95
731
+ },
732
+ {
733
+ "epoch": 1.92,
734
+ "learning_rate": 6.093861435897208e-05,
735
+ "loss": 1.8115,
736
+ "step": 96
737
+ },
738
+ {
739
+ "epoch": 1.94,
740
+ "learning_rate": 5.883811456387821e-05,
741
+ "loss": 1.4625,
742
+ "step": 97
743
+ },
744
+ {
745
+ "epoch": 1.96,
746
+ "learning_rate": 5.675925863493721e-05,
747
+ "loss": 1.08,
748
+ "step": 98
749
+ },
750
+ {
751
+ "epoch": 1.98,
752
+ "learning_rate": 5.4703139682413586e-05,
753
+ "loss": 1.6914,
754
+ "step": 99
755
+ },
756
+ {
757
+ "epoch": 2.0,
758
+ "learning_rate": 5.267083886094668e-05,
759
+ "loss": 1.6016,
760
+ "step": 100
761
+ },
762
+ {
763
+ "epoch": 2.0,
764
+ "eval_loss": 1.547050952911377,
765
+ "eval_runtime": 6.0254,
766
+ "eval_samples_per_second": 0.332,
767
+ "eval_steps_per_second": 0.332,
768
+ "step": 100
769
+ }
770
+ ],
771
+ "max_steps": 147,
772
+ "num_train_epochs": 3,
773
+ "total_flos": 5.5839227595669504e+17,
774
+ "trial_name": null,
775
+ "trial_params": null
776
+ }
checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e9038e6bbf455ce5d3676d50dae69e7eb4aa3998a7f4dd2e01e3256858af8f5
3
+ size 3899
checkpoint-60/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-60/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-60/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fae73a7e503ff0e787f33cfbaa4b35a81da2e535211081d392e92e835e977602
3
+ size 62787853
checkpoint-60/adapter_model/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-60/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-60/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fae73a7e503ff0e787f33cfbaa4b35a81da2e535211081d392e92e835e977602
3
+ size 62787853
checkpoint-60/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b201904c6d7bd10a5bcf3695e410d387af390c750c9c55c7d69ebcd3547bcd2d
3
+ size 15984958
checkpoint-60/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f6cf74835c1af9f9e3dc4bcfbc0eae1e84048401ffb87d26ff318411e17c02d
3
+ size 14575
checkpoint-60/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e07187181626841e6543bc334ada13e8ff2b9f907477bbf5d42b0726e897fb5
3
+ size 627
checkpoint-60/trainer_state.json ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.201000834028357,
5
+ "global_step": 60,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.02,
12
+ "learning_rate": 2e-05,
13
+ "loss": 1.7915,
14
+ "step": 1
15
+ },
16
+ {
17
+ "epoch": 0.04,
18
+ "learning_rate": 4e-05,
19
+ "loss": 1.7449,
20
+ "step": 2
21
+ },
22
+ {
23
+ "epoch": 0.06,
24
+ "learning_rate": 6e-05,
25
+ "loss": 1.7538,
26
+ "step": 3
27
+ },
28
+ {
29
+ "epoch": 0.08,
30
+ "learning_rate": 8e-05,
31
+ "loss": 1.713,
32
+ "step": 4
33
+ },
34
+ {
35
+ "epoch": 0.1,
36
+ "learning_rate": 0.0001,
37
+ "loss": 1.7313,
38
+ "step": 5
39
+ },
40
+ {
41
+ "epoch": 0.1,
42
+ "eval_loss": 1.6295605897903442,
43
+ "eval_runtime": 6.0457,
44
+ "eval_samples_per_second": 0.331,
45
+ "eval_steps_per_second": 0.331,
46
+ "step": 5
47
+ },
48
+ {
49
+ "epoch": 0.12,
50
+ "learning_rate": 0.00012,
51
+ "loss": 1.8184,
52
+ "step": 6
53
+ },
54
+ {
55
+ "epoch": 0.14,
56
+ "learning_rate": 0.00014,
57
+ "loss": 1.7586,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.16,
62
+ "learning_rate": 0.00016,
63
+ "loss": 1.8926,
64
+ "step": 8
65
+ },
66
+ {
67
+ "epoch": 0.18,
68
+ "learning_rate": 0.00018,
69
+ "loss": 1.9176,
70
+ "step": 9
71
+ },
72
+ {
73
+ "epoch": 0.2,
74
+ "learning_rate": 0.0002,
75
+ "loss": 1.584,
76
+ "step": 10
77
+ },
78
+ {
79
+ "epoch": 0.2,
80
+ "eval_loss": 1.606095790863037,
81
+ "eval_runtime": 6.0412,
82
+ "eval_samples_per_second": 0.331,
83
+ "eval_steps_per_second": 0.331,
84
+ "step": 10
85
+ },
86
+ {
87
+ "epoch": 0.22,
88
+ "learning_rate": 0.00019997370884991842,
89
+ "loss": 1.2008,
90
+ "step": 11
91
+ },
92
+ {
93
+ "epoch": 0.24,
94
+ "learning_rate": 0.00019989484922416502,
95
+ "loss": 1.2531,
96
+ "step": 12
97
+ },
98
+ {
99
+ "epoch": 0.26,
100
+ "learning_rate": 0.00019976346258894503,
101
+ "loss": 1.6787,
102
+ "step": 13
103
+ },
104
+ {
105
+ "epoch": 0.28,
106
+ "learning_rate": 0.00019957961803037326,
107
+ "loss": 1.7146,
108
+ "step": 14
109
+ },
110
+ {
111
+ "epoch": 0.3,
112
+ "learning_rate": 0.00019934341221814739,
113
+ "loss": 1.6937,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.3,
118
+ "eval_loss": 1.5841087102890015,
119
+ "eval_runtime": 6.0341,
120
+ "eval_samples_per_second": 0.331,
121
+ "eval_steps_per_second": 0.331,
122
+ "step": 15
123
+ },
124
+ {
125
+ "epoch": 0.32,
126
+ "learning_rate": 0.00019905496935471658,
127
+ "loss": 1.7531,
128
+ "step": 16
129
+ },
130
+ {
131
+ "epoch": 0.34,
132
+ "learning_rate": 0.0001987144411099731,
133
+ "loss": 1.7094,
134
+ "step": 17
135
+ },
136
+ {
137
+ "epoch": 0.36,
138
+ "learning_rate": 0.00019832200654150076,
139
+ "loss": 1.7333,
140
+ "step": 18
141
+ },
142
+ {
143
+ "epoch": 0.38,
144
+ "learning_rate": 0.00019787787200042223,
145
+ "loss": 1.7736,
146
+ "step": 19
147
+ },
148
+ {
149
+ "epoch": 0.4,
150
+ "learning_rate": 0.0001973822710228951,
151
+ "loss": 1.6655,
152
+ "step": 20
153
+ },
154
+ {
155
+ "epoch": 0.4,
156
+ "eval_loss": 1.5755609273910522,
157
+ "eval_runtime": 6.0389,
158
+ "eval_samples_per_second": 0.331,
159
+ "eval_steps_per_second": 0.331,
160
+ "step": 20
161
+ },
162
+ {
163
+ "epoch": 0.42,
164
+ "learning_rate": 0.0001968354642073129,
165
+ "loss": 1.8054,
166
+ "step": 21
167
+ },
168
+ {
169
+ "epoch": 0.44,
170
+ "learning_rate": 0.00019623773907727682,
171
+ "loss": 1.8327,
172
+ "step": 22
173
+ },
174
+ {
175
+ "epoch": 0.46,
176
+ "learning_rate": 0.00019558940993040885,
177
+ "loss": 1.3822,
178
+ "step": 23
179
+ },
180
+ {
181
+ "epoch": 0.48,
182
+ "learning_rate": 0.00019489081767308698,
183
+ "loss": 1.254,
184
+ "step": 24
185
+ },
186
+ {
187
+ "epoch": 0.5,
188
+ "learning_rate": 0.00019414232964118892,
189
+ "loss": 1.7648,
190
+ "step": 25
191
+ },
192
+ {
193
+ "epoch": 0.5,
194
+ "eval_loss": 1.5711314678192139,
195
+ "eval_runtime": 6.0254,
196
+ "eval_samples_per_second": 0.332,
197
+ "eval_steps_per_second": 0.332,
198
+ "step": 25
199
+ },
200
+ {
201
+ "epoch": 0.52,
202
+ "learning_rate": 0.0001933443394069383,
203
+ "loss": 1.641,
204
+ "step": 26
205
+ },
206
+ {
207
+ "epoch": 0.54,
208
+ "learning_rate": 0.00019249726657195532,
209
+ "loss": 1.7777,
210
+ "step": 27
211
+ },
212
+ {
213
+ "epoch": 0.56,
214
+ "learning_rate": 0.00019160155654662076,
215
+ "loss": 1.7038,
216
+ "step": 28
217
+ },
218
+ {
219
+ "epoch": 0.58,
220
+ "learning_rate": 0.0001906576803158686,
221
+ "loss": 1.6179,
222
+ "step": 29
223
+ },
224
+ {
225
+ "epoch": 0.6,
226
+ "learning_rate": 0.0001896661341915318,
227
+ "loss": 1.7813,
228
+ "step": 30
229
+ },
230
+ {
231
+ "epoch": 0.6,
232
+ "eval_loss": 1.5650010108947754,
233
+ "eval_runtime": 6.0228,
234
+ "eval_samples_per_second": 0.332,
235
+ "eval_steps_per_second": 0.332,
236
+ "step": 30
237
+ },
238
+ {
239
+ "epoch": 0.62,
240
+ "learning_rate": 0.00018862743955136966,
241
+ "loss": 1.661,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.64,
246
+ "learning_rate": 0.00018754214256491562,
247
+ "loss": 1.7948,
248
+ "step": 32
249
+ },
250
+ {
251
+ "epoch": 0.66,
252
+ "learning_rate": 0.00018641081390628877,
253
+ "loss": 1.92,
254
+ "step": 33
255
+ },
256
+ {
257
+ "epoch": 0.68,
258
+ "learning_rate": 0.00018523404845412027,
259
+ "loss": 1.6941,
260
+ "step": 34
261
+ },
262
+ {
263
+ "epoch": 0.7,
264
+ "learning_rate": 0.0001840124649787524,
265
+ "loss": 1.3461,
266
+ "step": 35
267
+ },
268
+ {
269
+ "epoch": 0.7,
270
+ "eval_loss": 1.5624847412109375,
271
+ "eval_runtime": 6.0325,
272
+ "eval_samples_per_second": 0.332,
273
+ "eval_steps_per_second": 0.332,
274
+ "step": 35
275
+ },
276
+ {
277
+ "epoch": 0.72,
278
+ "learning_rate": 0.0001827467058168748,
279
+ "loss": 0.8176,
280
+ "step": 36
281
+ },
282
+ {
283
+ "epoch": 0.74,
284
+ "learning_rate": 0.00018143743653376942,
285
+ "loss": 1.7262,
286
+ "step": 37
287
+ },
288
+ {
289
+ "epoch": 0.76,
290
+ "learning_rate": 0.00018008534557334064,
291
+ "loss": 1.7333,
292
+ "step": 38
293
+ },
294
+ {
295
+ "epoch": 0.78,
296
+ "learning_rate": 0.00017869114389611575,
297
+ "loss": 1.5991,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.8,
302
+ "learning_rate": 0.0001772555646054055,
303
+ "loss": 1.7267,
304
+ "step": 40
305
+ },
306
+ {
307
+ "epoch": 0.8,
308
+ "eval_loss": 1.5579214096069336,
309
+ "eval_runtime": 6.0388,
310
+ "eval_samples_per_second": 0.331,
311
+ "eval_steps_per_second": 0.331,
312
+ "step": 40
313
+ },
314
+ {
315
+ "epoch": 0.82,
316
+ "learning_rate": 0.00017577936256182167,
317
+ "loss": 1.6694,
318
+ "step": 41
319
+ },
320
+ {
321
+ "epoch": 0.84,
322
+ "learning_rate": 0.0001742633139863538,
323
+ "loss": 1.8201,
324
+ "step": 42
325
+ },
326
+ {
327
+ "epoch": 0.86,
328
+ "learning_rate": 0.0001727082160522145,
329
+ "loss": 1.7913,
330
+ "step": 43
331
+ },
332
+ {
333
+ "epoch": 0.88,
334
+ "learning_rate": 0.00017111488646566727,
335
+ "loss": 1.825,
336
+ "step": 44
337
+ },
338
+ {
339
+ "epoch": 0.9,
340
+ "learning_rate": 0.00016948416303605795,
341
+ "loss": 1.7778,
342
+ "step": 45
343
+ },
344
+ {
345
+ "epoch": 0.9,
346
+ "eval_loss": 1.555617094039917,
347
+ "eval_runtime": 6.0421,
348
+ "eval_samples_per_second": 0.331,
349
+ "eval_steps_per_second": 0.331,
350
+ "step": 45
351
+ },
352
+ {
353
+ "epoch": 0.92,
354
+ "learning_rate": 0.00016781690323527511,
355
+ "loss": 1.6311,
356
+ "step": 46
357
+ },
358
+ {
359
+ "epoch": 0.94,
360
+ "learning_rate": 0.0001661139837468717,
361
+ "loss": 1.1499,
362
+ "step": 47
363
+ },
364
+ {
365
+ "epoch": 0.96,
366
+ "learning_rate": 0.00016437630000508464,
367
+ "loss": 1.0455,
368
+ "step": 48
369
+ },
370
+ {
371
+ "epoch": 0.98,
372
+ "learning_rate": 0.00016260476572399496,
373
+ "loss": 1.7178,
374
+ "step": 49
375
+ },
376
+ {
377
+ "epoch": 1.0,
378
+ "learning_rate": 0.00016080031241707578,
379
+ "loss": 1.4832,
380
+ "step": 50
381
+ },
382
+ {
383
+ "epoch": 1.0,
384
+ "eval_loss": 1.554579257965088,
385
+ "eval_runtime": 6.0239,
386
+ "eval_samples_per_second": 0.332,
387
+ "eval_steps_per_second": 0.332,
388
+ "step": 50
389
+ },
390
+ {
391
+ "epoch": 1.02,
392
+ "learning_rate": 0.00015896388890738127,
393
+ "loss": 1.6801,
394
+ "step": 51
395
+ },
396
+ {
397
+ "epoch": 1.04,
398
+ "learning_rate": 0.0001570964608286336,
399
+ "loss": 1.6462,
400
+ "step": 52
401
+ },
402
+ {
403
+ "epoch": 1.06,
404
+ "learning_rate": 0.00015519901011747044,
405
+ "loss": 1.7264,
406
+ "step": 53
407
+ },
408
+ {
409
+ "epoch": 1.08,
410
+ "learning_rate": 0.0001532725344971202,
411
+ "loss": 1.63,
412
+ "step": 54
413
+ },
414
+ {
415
+ "epoch": 1.1,
416
+ "learning_rate": 0.00015131804695277612,
417
+ "loss": 1.7584,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 1.1,
422
+ "eval_loss": 1.5519572496414185,
423
+ "eval_runtime": 6.0181,
424
+ "eval_samples_per_second": 0.332,
425
+ "eval_steps_per_second": 0.332,
426
+ "step": 55
427
+ },
428
+ {
429
+ "epoch": 1.12,
430
+ "learning_rate": 0.0001493365751989454,
431
+ "loss": 1.7956,
432
+ "step": 56
433
+ },
434
+ {
435
+ "epoch": 1.14,
436
+ "learning_rate": 0.00014732916113905335,
437
+ "loss": 1.6528,
438
+ "step": 57
439
+ },
440
+ {
441
+ "epoch": 1.16,
442
+ "learning_rate": 0.00014529686031758643,
443
+ "loss": 1.7006,
444
+ "step": 58
445
+ },
446
+ {
447
+ "epoch": 1.18,
448
+ "learning_rate": 0.00014324074136506284,
449
+ "loss": 1.8171,
450
+ "step": 59
451
+ },
452
+ {
453
+ "epoch": 1.2,
454
+ "learning_rate": 0.0001411618854361218,
455
+ "loss": 1.4825,
456
+ "step": 60
457
+ },
458
+ {
459
+ "epoch": 1.2,
460
+ "eval_loss": 1.5512959957122803,
461
+ "eval_runtime": 6.0143,
462
+ "eval_samples_per_second": 0.333,
463
+ "eval_steps_per_second": 0.333,
464
+ "step": 60
465
+ }
466
+ ],
467
+ "max_steps": 147,
468
+ "num_train_epochs": 3,
469
+ "total_flos": 3.4027598710554624e+17,
470
+ "trial_name": null,
471
+ "trial_params": null
472
+ }
checkpoint-60/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e9038e6bbf455ce5d3676d50dae69e7eb4aa3998a7f4dd2e01e3256858af8f5
3
+ size 3899
checkpoint-80/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-80/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-80/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7f19fa763b33100eb23298e62b5a9f93769cf5ed88ad4377284753280d1b60f
3
+ size 62787853
checkpoint-80/adapter_model/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-80/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "down_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "v_proj",
22
+ "k_proj",
23
+ "o_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-80/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7f19fa763b33100eb23298e62b5a9f93769cf5ed88ad4377284753280d1b60f
3
+ size 62787853
checkpoint-80/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e706147cdd0c12b6cde3ada445cb8c8e99267788373325729a458f0e30cf3885
3
+ size 15984958
checkpoint-80/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8400c928f00bba2be3b202ea8da8ba62c6a19dbbf1f76dbc77e0305cb22a1fdf
3
+ size 14575
checkpoint-80/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51c342b2e1bcc1ef436ff492fb779d0a0498f50ada8fe3c00aa3e2ba6fad3b10
3
+ size 627
checkpoint-80/trainer_state.json ADDED
@@ -0,0 +1,624 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.6013344453711427,
5
+ "global_step": 80,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.02,
12
+ "learning_rate": 2e-05,
13
+ "loss": 1.7915,
14
+ "step": 1
15
+ },
16
+ {
17
+ "epoch": 0.04,
18
+ "learning_rate": 4e-05,
19
+ "loss": 1.7449,
20
+ "step": 2
21
+ },
22
+ {
23
+ "epoch": 0.06,
24
+ "learning_rate": 6e-05,
25
+ "loss": 1.7538,
26
+ "step": 3
27
+ },
28
+ {
29
+ "epoch": 0.08,
30
+ "learning_rate": 8e-05,
31
+ "loss": 1.713,
32
+ "step": 4
33
+ },
34
+ {
35
+ "epoch": 0.1,
36
+ "learning_rate": 0.0001,
37
+ "loss": 1.7313,
38
+ "step": 5
39
+ },
40
+ {
41
+ "epoch": 0.1,
42
+ "eval_loss": 1.6295605897903442,
43
+ "eval_runtime": 6.0457,
44
+ "eval_samples_per_second": 0.331,
45
+ "eval_steps_per_second": 0.331,
46
+ "step": 5
47
+ },
48
+ {
49
+ "epoch": 0.12,
50
+ "learning_rate": 0.00012,
51
+ "loss": 1.8184,
52
+ "step": 6
53
+ },
54
+ {
55
+ "epoch": 0.14,
56
+ "learning_rate": 0.00014,
57
+ "loss": 1.7586,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.16,
62
+ "learning_rate": 0.00016,
63
+ "loss": 1.8926,
64
+ "step": 8
65
+ },
66
+ {
67
+ "epoch": 0.18,
68
+ "learning_rate": 0.00018,
69
+ "loss": 1.9176,
70
+ "step": 9
71
+ },
72
+ {
73
+ "epoch": 0.2,
74
+ "learning_rate": 0.0002,
75
+ "loss": 1.584,
76
+ "step": 10
77
+ },
78
+ {
79
+ "epoch": 0.2,
80
+ "eval_loss": 1.606095790863037,
81
+ "eval_runtime": 6.0412,
82
+ "eval_samples_per_second": 0.331,
83
+ "eval_steps_per_second": 0.331,
84
+ "step": 10
85
+ },
86
+ {
87
+ "epoch": 0.22,
88
+ "learning_rate": 0.00019997370884991842,
89
+ "loss": 1.2008,
90
+ "step": 11
91
+ },
92
+ {
93
+ "epoch": 0.24,
94
+ "learning_rate": 0.00019989484922416502,
95
+ "loss": 1.2531,
96
+ "step": 12
97
+ },
98
+ {
99
+ "epoch": 0.26,
100
+ "learning_rate": 0.00019976346258894503,
101
+ "loss": 1.6787,
102
+ "step": 13
103
+ },
104
+ {
105
+ "epoch": 0.28,
106
+ "learning_rate": 0.00019957961803037326,
107
+ "loss": 1.7146,
108
+ "step": 14
109
+ },
110
+ {
111
+ "epoch": 0.3,
112
+ "learning_rate": 0.00019934341221814739,
113
+ "loss": 1.6937,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.3,
118
+ "eval_loss": 1.5841087102890015,
119
+ "eval_runtime": 6.0341,
120
+ "eval_samples_per_second": 0.331,
121
+ "eval_steps_per_second": 0.331,
122
+ "step": 15
123
+ },
124
+ {
125
+ "epoch": 0.32,
126
+ "learning_rate": 0.00019905496935471658,
127
+ "loss": 1.7531,
128
+ "step": 16
129
+ },
130
+ {
131
+ "epoch": 0.34,
132
+ "learning_rate": 0.0001987144411099731,
133
+ "loss": 1.7094,
134
+ "step": 17
135
+ },
136
+ {
137
+ "epoch": 0.36,
138
+ "learning_rate": 0.00019832200654150076,
139
+ "loss": 1.7333,
140
+ "step": 18
141
+ },
142
+ {
143
+ "epoch": 0.38,
144
+ "learning_rate": 0.00019787787200042223,
145
+ "loss": 1.7736,
146
+ "step": 19
147
+ },
148
+ {
149
+ "epoch": 0.4,
150
+ "learning_rate": 0.0001973822710228951,
151
+ "loss": 1.6655,
152
+ "step": 20
153
+ },
154
+ {
155
+ "epoch": 0.4,
156
+ "eval_loss": 1.5755609273910522,
157
+ "eval_runtime": 6.0389,
158
+ "eval_samples_per_second": 0.331,
159
+ "eval_steps_per_second": 0.331,
160
+ "step": 20
161
+ },
162
+ {
163
+ "epoch": 0.42,
164
+ "learning_rate": 0.0001968354642073129,
165
+ "loss": 1.8054,
166
+ "step": 21
167
+ },
168
+ {
169
+ "epoch": 0.44,
170
+ "learning_rate": 0.00019623773907727682,
171
+ "loss": 1.8327,
172
+ "step": 22
173
+ },
174
+ {
175
+ "epoch": 0.46,
176
+ "learning_rate": 0.00019558940993040885,
177
+ "loss": 1.3822,
178
+ "step": 23
179
+ },
180
+ {
181
+ "epoch": 0.48,
182
+ "learning_rate": 0.00019489081767308698,
183
+ "loss": 1.254,
184
+ "step": 24
185
+ },
186
+ {
187
+ "epoch": 0.5,
188
+ "learning_rate": 0.00019414232964118892,
189
+ "loss": 1.7648,
190
+ "step": 25
191
+ },
192
+ {
193
+ "epoch": 0.5,
194
+ "eval_loss": 1.5711314678192139,
195
+ "eval_runtime": 6.0254,
196
+ "eval_samples_per_second": 0.332,
197
+ "eval_steps_per_second": 0.332,
198
+ "step": 25
199
+ },
200
+ {
201
+ "epoch": 0.52,
202
+ "learning_rate": 0.0001933443394069383,
203
+ "loss": 1.641,
204
+ "step": 26
205
+ },
206
+ {
207
+ "epoch": 0.54,
208
+ "learning_rate": 0.00019249726657195532,
209
+ "loss": 1.7777,
210
+ "step": 27
211
+ },
212
+ {
213
+ "epoch": 0.56,
214
+ "learning_rate": 0.00019160155654662076,
215
+ "loss": 1.7038,
216
+ "step": 28
217
+ },
218
+ {
219
+ "epoch": 0.58,
220
+ "learning_rate": 0.0001906576803158686,
221
+ "loss": 1.6179,
222
+ "step": 29
223
+ },
224
+ {
225
+ "epoch": 0.6,
226
+ "learning_rate": 0.0001896661341915318,
227
+ "loss": 1.7813,
228
+ "step": 30
229
+ },
230
+ {
231
+ "epoch": 0.6,
232
+ "eval_loss": 1.5650010108947754,
233
+ "eval_runtime": 6.0228,
234
+ "eval_samples_per_second": 0.332,
235
+ "eval_steps_per_second": 0.332,
236
+ "step": 30
237
+ },
238
+ {
239
+ "epoch": 0.62,
240
+ "learning_rate": 0.00018862743955136966,
241
+ "loss": 1.661,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.64,
246
+ "learning_rate": 0.00018754214256491562,
247
+ "loss": 1.7948,
248
+ "step": 32
249
+ },
250
+ {
251
+ "epoch": 0.66,
252
+ "learning_rate": 0.00018641081390628877,
253
+ "loss": 1.92,
254
+ "step": 33
255
+ },
256
+ {
257
+ "epoch": 0.68,
258
+ "learning_rate": 0.00018523404845412027,
259
+ "loss": 1.6941,
260
+ "step": 34
261
+ },
262
+ {
263
+ "epoch": 0.7,
264
+ "learning_rate": 0.0001840124649787524,
265
+ "loss": 1.3461,
266
+ "step": 35
267
+ },
268
+ {
269
+ "epoch": 0.7,
270
+ "eval_loss": 1.5624847412109375,
271
+ "eval_runtime": 6.0325,
272
+ "eval_samples_per_second": 0.332,
273
+ "eval_steps_per_second": 0.332,
274
+ "step": 35
275
+ },
276
+ {
277
+ "epoch": 0.72,
278
+ "learning_rate": 0.0001827467058168748,
279
+ "loss": 0.8176,
280
+ "step": 36
281
+ },
282
+ {
283
+ "epoch": 0.74,
284
+ "learning_rate": 0.00018143743653376942,
285
+ "loss": 1.7262,
286
+ "step": 37
287
+ },
288
+ {
289
+ "epoch": 0.76,
290
+ "learning_rate": 0.00018008534557334064,
291
+ "loss": 1.7333,
292
+ "step": 38
293
+ },
294
+ {
295
+ "epoch": 0.78,
296
+ "learning_rate": 0.00017869114389611575,
297
+ "loss": 1.5991,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.8,
302
+ "learning_rate": 0.0001772555646054055,
303
+ "loss": 1.7267,
304
+ "step": 40
305
+ },
306
+ {
307
+ "epoch": 0.8,
308
+ "eval_loss": 1.5579214096069336,
309
+ "eval_runtime": 6.0388,
310
+ "eval_samples_per_second": 0.331,
311
+ "eval_steps_per_second": 0.331,
312
+ "step": 40
313
+ },
314
+ {
315
+ "epoch": 0.82,
316
+ "learning_rate": 0.00017577936256182167,
317
+ "loss": 1.6694,
318
+ "step": 41
319
+ },
320
+ {
321
+ "epoch": 0.84,
322
+ "learning_rate": 0.0001742633139863538,
323
+ "loss": 1.8201,
324
+ "step": 42
325
+ },
326
+ {
327
+ "epoch": 0.86,
328
+ "learning_rate": 0.0001727082160522145,
329
+ "loss": 1.7913,
330
+ "step": 43
331
+ },
332
+ {
333
+ "epoch": 0.88,
334
+ "learning_rate": 0.00017111488646566727,
335
+ "loss": 1.825,
336
+ "step": 44
337
+ },
338
+ {
339
+ "epoch": 0.9,
340
+ "learning_rate": 0.00016948416303605795,
341
+ "loss": 1.7778,
342
+ "step": 45
343
+ },
344
+ {
345
+ "epoch": 0.9,
346
+ "eval_loss": 1.555617094039917,
347
+ "eval_runtime": 6.0421,
348
+ "eval_samples_per_second": 0.331,
349
+ "eval_steps_per_second": 0.331,
350
+ "step": 45
351
+ },
352
+ {
353
+ "epoch": 0.92,
354
+ "learning_rate": 0.00016781690323527511,
355
+ "loss": 1.6311,
356
+ "step": 46
357
+ },
358
+ {
359
+ "epoch": 0.94,
360
+ "learning_rate": 0.0001661139837468717,
361
+ "loss": 1.1499,
362
+ "step": 47
363
+ },
364
+ {
365
+ "epoch": 0.96,
366
+ "learning_rate": 0.00016437630000508464,
367
+ "loss": 1.0455,
368
+ "step": 48
369
+ },
370
+ {
371
+ "epoch": 0.98,
372
+ "learning_rate": 0.00016260476572399496,
373
+ "loss": 1.7178,
374
+ "step": 49
375
+ },
376
+ {
377
+ "epoch": 1.0,
378
+ "learning_rate": 0.00016080031241707578,
379
+ "loss": 1.4832,
380
+ "step": 50
381
+ },
382
+ {
383
+ "epoch": 1.0,
384
+ "eval_loss": 1.554579257965088,
385
+ "eval_runtime": 6.0239,
386
+ "eval_samples_per_second": 0.332,
387
+ "eval_steps_per_second": 0.332,
388
+ "step": 50
389
+ },
390
+ {
391
+ "epoch": 1.02,
392
+ "learning_rate": 0.00015896388890738127,
393
+ "loss": 1.6801,
394
+ "step": 51
395
+ },
396
+ {
397
+ "epoch": 1.04,
398
+ "learning_rate": 0.0001570964608286336,
399
+ "loss": 1.6462,
400
+ "step": 52
401
+ },
402
+ {
403
+ "epoch": 1.06,
404
+ "learning_rate": 0.00015519901011747044,
405
+ "loss": 1.7264,
406
+ "step": 53
407
+ },
408
+ {
409
+ "epoch": 1.08,
410
+ "learning_rate": 0.0001532725344971202,
411
+ "loss": 1.63,
412
+ "step": 54
413
+ },
414
+ {
415
+ "epoch": 1.1,
416
+ "learning_rate": 0.00015131804695277612,
417
+ "loss": 1.7584,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 1.1,
422
+ "eval_loss": 1.5519572496414185,
423
+ "eval_runtime": 6.0181,
424
+ "eval_samples_per_second": 0.332,
425
+ "eval_steps_per_second": 0.332,
426
+ "step": 55
427
+ },
428
+ {
429
+ "epoch": 1.12,
430
+ "learning_rate": 0.0001493365751989454,
431
+ "loss": 1.7956,
432
+ "step": 56
433
+ },
434
+ {
435
+ "epoch": 1.14,
436
+ "learning_rate": 0.00014732916113905335,
437
+ "loss": 1.6528,
438
+ "step": 57
439
+ },
440
+ {
441
+ "epoch": 1.16,
442
+ "learning_rate": 0.00014529686031758643,
443
+ "loss": 1.7006,
444
+ "step": 58
445
+ },
446
+ {
447
+ "epoch": 1.18,
448
+ "learning_rate": 0.00014324074136506284,
449
+ "loss": 1.8171,
450
+ "step": 59
451
+ },
452
+ {
453
+ "epoch": 1.2,
454
+ "learning_rate": 0.0001411618854361218,
455
+ "loss": 1.4825,
456
+ "step": 60
457
+ },
458
+ {
459
+ "epoch": 1.2,
460
+ "eval_loss": 1.5512959957122803,
461
+ "eval_runtime": 6.0143,
462
+ "eval_samples_per_second": 0.333,
463
+ "eval_steps_per_second": 0.333,
464
+ "step": 60
465
+ },
466
+ {
467
+ "epoch": 1.22,
468
+ "learning_rate": 0.00013906138564102793,
469
+ "loss": 0.9645,
470
+ "step": 61
471
+ },
472
+ {
473
+ "epoch": 1.24,
474
+ "learning_rate": 0.0001369403464708884,
475
+ "loss": 1.2708,
476
+ "step": 62
477
+ },
478
+ {
479
+ "epoch": 1.26,
480
+ "learning_rate": 0.0001347998832168862,
481
+ "loss": 1.7432,
482
+ "step": 63
483
+ },
484
+ {
485
+ "epoch": 1.28,
486
+ "learning_rate": 0.00013264112138383445,
487
+ "loss": 1.6986,
488
+ "step": 64
489
+ },
490
+ {
491
+ "epoch": 1.3,
492
+ "learning_rate": 0.00013046519609836,
493
+ "loss": 1.7969,
494
+ "step": 65
495
+ },
496
+ {
497
+ "epoch": 1.3,
498
+ "eval_loss": 1.5507731437683105,
499
+ "eval_runtime": 6.0363,
500
+ "eval_samples_per_second": 0.331,
501
+ "eval_steps_per_second": 0.331,
502
+ "step": 65
503
+ },
504
+ {
505
+ "epoch": 1.32,
506
+ "learning_rate": 0.00012827325151202782,
507
+ "loss": 1.5769,
508
+ "step": 66
509
+ },
510
+ {
511
+ "epoch": 1.34,
512
+ "learning_rate": 0.00012606644019971968,
513
+ "loss": 1.7011,
514
+ "step": 67
515
+ },
516
+ {
517
+ "epoch": 1.36,
518
+ "learning_rate": 0.00012384592255358385,
519
+ "loss": 1.6525,
520
+ "step": 68
521
+ },
522
+ {
523
+ "epoch": 1.38,
524
+ "learning_rate": 0.00012161286617287419,
525
+ "loss": 1.6834,
526
+ "step": 69
527
+ },
528
+ {
529
+ "epoch": 1.4,
530
+ "learning_rate": 0.00011936844524999966,
531
+ "loss": 1.7182,
532
+ "step": 70
533
+ },
534
+ {
535
+ "epoch": 1.4,
536
+ "eval_loss": 1.549034595489502,
537
+ "eval_runtime": 6.0315,
538
+ "eval_samples_per_second": 0.332,
539
+ "eval_steps_per_second": 0.332,
540
+ "step": 70
541
+ },
542
+ {
543
+ "epoch": 1.42,
544
+ "learning_rate": 0.00011711383995310681,
545
+ "loss": 1.7941,
546
+ "step": 71
547
+ },
548
+ {
549
+ "epoch": 1.44,
550
+ "learning_rate": 0.00011485023580552039,
551
+ "loss": 1.6289,
552
+ "step": 72
553
+ },
554
+ {
555
+ "epoch": 1.46,
556
+ "learning_rate": 0.00011257882306236775,
557
+ "loss": 1.1176,
558
+ "step": 73
559
+ },
560
+ {
561
+ "epoch": 1.48,
562
+ "learning_rate": 0.00011030079608471544,
563
+ "loss": 0.8403,
564
+ "step": 74
565
+ },
566
+ {
567
+ "epoch": 1.5,
568
+ "learning_rate": 0.00010801735271154669,
569
+ "loss": 1.724,
570
+ "step": 75
571
+ },
572
+ {
573
+ "epoch": 1.5,
574
+ "eval_loss": 1.5494024753570557,
575
+ "eval_runtime": 6.0321,
576
+ "eval_samples_per_second": 0.332,
577
+ "eval_steps_per_second": 0.332,
578
+ "step": 75
579
+ },
580
+ {
581
+ "epoch": 1.52,
582
+ "learning_rate": 0.00010572969362990998,
583
+ "loss": 1.6746,
584
+ "step": 76
585
+ },
586
+ {
587
+ "epoch": 1.54,
588
+ "learning_rate": 0.00010343902174357039,
589
+ "loss": 1.7152,
590
+ "step": 77
591
+ },
592
+ {
593
+ "epoch": 1.56,
594
+ "learning_rate": 0.0001011465415404949,
595
+ "loss": 1.7054,
596
+ "step": 78
597
+ },
598
+ {
599
+ "epoch": 1.58,
600
+ "learning_rate": 9.88534584595051e-05,
601
+ "loss": 1.7333,
602
+ "step": 79
603
+ },
604
+ {
605
+ "epoch": 1.6,
606
+ "learning_rate": 9.656097825642961e-05,
607
+ "loss": 1.7083,
608
+ "step": 80
609
+ },
610
+ {
611
+ "epoch": 1.6,
612
+ "eval_loss": 1.5476174354553223,
613
+ "eval_runtime": 6.0289,
614
+ "eval_samples_per_second": 0.332,
615
+ "eval_steps_per_second": 0.332,
616
+ "step": 80
617
+ }
618
+ ],
619
+ "max_steps": 147,
620
+ "num_train_epochs": 3,
621
+ "total_flos": 4.516064592954163e+17,
622
+ "trial_name": null,
623
+ "trial_params": null
624
+ }
checkpoint-80/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e9038e6bbf455ce5d3676d50dae69e7eb4aa3998a7f4dd2e01e3256858af8f5
3
+ size 3899