teachyourselfcoding commited on
Commit
bddca71
1 Parent(s): 441a1fb

Upload 99 files

Browse files

50000 General
30000 Legislation
10000 Sensitive
500 Judgement

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +18 -0
  2. adapter_config.json +18 -0
  3. adapter_model.bin +3 -0
  4. checkpoint-1000/README.md +21 -0
  5. checkpoint-1000/adapter_config.json +21 -0
  6. checkpoint-1000/adapter_model.bin +3 -0
  7. checkpoint-1000/optimizer.pt +3 -0
  8. checkpoint-1000/rng_state_0.pth +3 -0
  9. checkpoint-1000/rng_state_1.pth +3 -0
  10. checkpoint-1000/rng_state_2.pth +3 -0
  11. checkpoint-1000/rng_state_3.pth +3 -0
  12. checkpoint-1000/rng_state_4.pth +3 -0
  13. checkpoint-1000/scheduler.pt +3 -0
  14. checkpoint-1000/trainer_state.json +359 -0
  15. checkpoint-1000/training_args.bin +3 -0
  16. checkpoint-1200/README.md +21 -0
  17. checkpoint-1200/adapter_config.json +21 -0
  18. checkpoint-1200/adapter_model.bin +3 -0
  19. checkpoint-1200/optimizer.pt +3 -0
  20. checkpoint-1200/rng_state_0.pth +3 -0
  21. checkpoint-1200/rng_state_1.pth +3 -0
  22. checkpoint-1200/rng_state_2.pth +3 -0
  23. checkpoint-1200/rng_state_3.pth +3 -0
  24. checkpoint-1200/rng_state_4.pth +3 -0
  25. checkpoint-1200/scheduler.pt +3 -0
  26. checkpoint-1200/trainer_state.json +427 -0
  27. checkpoint-1200/training_args.bin +3 -0
  28. checkpoint-1400/README.md +21 -0
  29. checkpoint-1400/adapter_config.json +21 -0
  30. checkpoint-1400/adapter_model.bin +3 -0
  31. checkpoint-1400/optimizer.pt +3 -0
  32. checkpoint-1400/rng_state_0.pth +3 -0
  33. checkpoint-1400/rng_state_1.pth +3 -0
  34. checkpoint-1400/rng_state_2.pth +3 -0
  35. checkpoint-1400/rng_state_3.pth +3 -0
  36. checkpoint-1400/rng_state_4.pth +3 -0
  37. checkpoint-1400/scheduler.pt +3 -0
  38. checkpoint-1400/trainer_state.json +495 -0
  39. checkpoint-1400/training_args.bin +3 -0
  40. checkpoint-1600/README.md +21 -0
  41. checkpoint-1600/adapter_config.json +21 -0
  42. checkpoint-1600/adapter_model.bin +3 -0
  43. checkpoint-1600/optimizer.pt +3 -0
  44. checkpoint-1600/rng_state_0.pth +3 -0
  45. checkpoint-1600/rng_state_1.pth +3 -0
  46. checkpoint-1600/rng_state_2.pth +3 -0
  47. checkpoint-1600/rng_state_3.pth +3 -0
  48. checkpoint-1600/rng_state_4.pth +3 -0
  49. checkpoint-1600/scheduler.pt +3 -0
  50. checkpoint-1600/trainer_state.json +563 -0
README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Training procedure
2
+
3
+
4
+ The following `bitsandbytes` quantization config was used during training:
5
+ - quant_method: bitsandbytes
6
+ - load_in_8bit: True
7
+ - load_in_4bit: False
8
+ - llm_int8_threshold: 6.0
9
+ - llm_int8_skip_modules: None
10
+ - llm_int8_enable_fp32_cpu_offload: False
11
+ - llm_int8_has_fp16_weight: False
12
+ - bnb_4bit_quant_type: fp4
13
+ - bnb_4bit_use_double_quant: False
14
+ - bnb_4bit_compute_dtype: float32
15
+ ### Framework versions
16
+
17
+
18
+ - PEFT 0.6.0.dev0
adapter_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "decapoda-research/llama-7b-hf",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "lora_alpha": 16,
8
+ "lora_dropout": 0.05,
9
+ "merge_weights": false,
10
+ "modules_to_save": null,
11
+ "peft_type": "LORA",
12
+ "r": 8,
13
+ "target_modules": [
14
+ "q_proj",
15
+ "v_proj"
16
+ ],
17
+ "task_type": "CAUSAL_LM"
18
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1000/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "../chinese-llama-2-13b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-1000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:519989b5c4203349488ab3a72641e430710473f479d606018f1c5ea510bda3ad
3
+ size 52562757
checkpoint-1000/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d555fa2b7f06822b0ea390d8be6e59b9c2c1df90e9a970218bbcaa9e658dd3f
3
+ size 18679
checkpoint-1000/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6c65da58622be8f443206bf19b99171fcc17cd5b3062a5d474cfea0d421928e
3
+ size 18679
checkpoint-1000/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9e46a0408e00234db670e5678b09d818d17644da30b372c23365cf4553d0eab
3
+ size 18679
checkpoint-1000/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37db40b7a6693356857b0b1ef19519a6e374c4a22ecba286d5be22660bd721c8
3
+ size 18679
checkpoint-1000/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e2da9f2cedddbb29bd7fdcf9b4ccb986ed0aae0e221ad3615744cbbc3e72a38
3
+ size 18679
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85d443feb657783013f5feaa6f3fca061b9db934a594151c8d1707fc020521ed
3
+ size 627
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.2406948804855347,
3
+ "best_model_checkpoint": "../llama2-9439-21sept/checkpoint-1000",
4
+ "epoch": 1.7366136034732271,
5
+ "eval_steps": 200,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 5.9999999999999995e-05,
14
+ "loss": 3.4609,
15
+ "step": 20
16
+ },
17
+ {
18
+ "epoch": 0.07,
19
+ "learning_rate": 0.00011999999999999999,
20
+ "loss": 2.8346,
21
+ "step": 40
22
+ },
23
+ {
24
+ "epoch": 0.1,
25
+ "learning_rate": 0.00017999999999999998,
26
+ "loss": 1.7597,
27
+ "step": 60
28
+ },
29
+ {
30
+ "epoch": 0.14,
31
+ "learning_rate": 0.00023999999999999998,
32
+ "loss": 1.542,
33
+ "step": 80
34
+ },
35
+ {
36
+ "epoch": 0.17,
37
+ "learning_rate": 0.0003,
38
+ "loss": 1.4567,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.21,
43
+ "learning_rate": 0.0002963076923076923,
44
+ "loss": 1.4081,
45
+ "step": 120
46
+ },
47
+ {
48
+ "epoch": 0.24,
49
+ "learning_rate": 0.0002926153846153846,
50
+ "loss": 1.3805,
51
+ "step": 140
52
+ },
53
+ {
54
+ "epoch": 0.28,
55
+ "learning_rate": 0.0002889230769230769,
56
+ "loss": 1.3605,
57
+ "step": 160
58
+ },
59
+ {
60
+ "epoch": 0.31,
61
+ "learning_rate": 0.00028523076923076923,
62
+ "loss": 1.3329,
63
+ "step": 180
64
+ },
65
+ {
66
+ "epoch": 0.35,
67
+ "learning_rate": 0.0002815384615384615,
68
+ "loss": 1.3367,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.35,
73
+ "eval_loss": 1.3450770378112793,
74
+ "eval_runtime": 6.306,
75
+ "eval_samples_per_second": 47.574,
76
+ "eval_steps_per_second": 1.269,
77
+ "step": 200
78
+ },
79
+ {
80
+ "epoch": 0.38,
81
+ "learning_rate": 0.0002778461538461538,
82
+ "loss": 1.3152,
83
+ "step": 220
84
+ },
85
+ {
86
+ "epoch": 0.42,
87
+ "learning_rate": 0.0002741538461538461,
88
+ "loss": 1.312,
89
+ "step": 240
90
+ },
91
+ {
92
+ "epoch": 0.45,
93
+ "learning_rate": 0.00027046153846153843,
94
+ "loss": 1.2883,
95
+ "step": 260
96
+ },
97
+ {
98
+ "epoch": 0.49,
99
+ "learning_rate": 0.00026676923076923074,
100
+ "loss": 1.2843,
101
+ "step": 280
102
+ },
103
+ {
104
+ "epoch": 0.52,
105
+ "learning_rate": 0.00026307692307692306,
106
+ "loss": 1.2609,
107
+ "step": 300
108
+ },
109
+ {
110
+ "epoch": 0.56,
111
+ "learning_rate": 0.00025938461538461537,
112
+ "loss": 1.2707,
113
+ "step": 320
114
+ },
115
+ {
116
+ "epoch": 0.59,
117
+ "learning_rate": 0.0002556923076923077,
118
+ "loss": 1.2624,
119
+ "step": 340
120
+ },
121
+ {
122
+ "epoch": 0.63,
123
+ "learning_rate": 0.00025199999999999995,
124
+ "loss": 1.273,
125
+ "step": 360
126
+ },
127
+ {
128
+ "epoch": 0.66,
129
+ "learning_rate": 0.0002483076923076923,
130
+ "loss": 1.251,
131
+ "step": 380
132
+ },
133
+ {
134
+ "epoch": 0.69,
135
+ "learning_rate": 0.0002446153846153846,
136
+ "loss": 1.2648,
137
+ "step": 400
138
+ },
139
+ {
140
+ "epoch": 0.69,
141
+ "eval_loss": 1.292348861694336,
142
+ "eval_runtime": 6.304,
143
+ "eval_samples_per_second": 47.589,
144
+ "eval_steps_per_second": 1.269,
145
+ "step": 400
146
+ },
147
+ {
148
+ "epoch": 0.73,
149
+ "learning_rate": 0.0002409230769230769,
150
+ "loss": 1.265,
151
+ "step": 420
152
+ },
153
+ {
154
+ "epoch": 0.76,
155
+ "learning_rate": 0.0002372307692307692,
156
+ "loss": 1.2516,
157
+ "step": 440
158
+ },
159
+ {
160
+ "epoch": 0.8,
161
+ "learning_rate": 0.00023353846153846151,
162
+ "loss": 1.2441,
163
+ "step": 460
164
+ },
165
+ {
166
+ "epoch": 0.83,
167
+ "learning_rate": 0.00022984615384615383,
168
+ "loss": 1.2204,
169
+ "step": 480
170
+ },
171
+ {
172
+ "epoch": 0.87,
173
+ "learning_rate": 0.00022615384615384614,
174
+ "loss": 1.2221,
175
+ "step": 500
176
+ },
177
+ {
178
+ "epoch": 0.9,
179
+ "learning_rate": 0.00022246153846153846,
180
+ "loss": 1.2246,
181
+ "step": 520
182
+ },
183
+ {
184
+ "epoch": 0.94,
185
+ "learning_rate": 0.00021876923076923074,
186
+ "loss": 1.2227,
187
+ "step": 540
188
+ },
189
+ {
190
+ "epoch": 0.97,
191
+ "learning_rate": 0.00021507692307692306,
192
+ "loss": 1.2124,
193
+ "step": 560
194
+ },
195
+ {
196
+ "epoch": 1.01,
197
+ "learning_rate": 0.00021138461538461537,
198
+ "loss": 1.2065,
199
+ "step": 580
200
+ },
201
+ {
202
+ "epoch": 1.04,
203
+ "learning_rate": 0.00020769230769230766,
204
+ "loss": 1.2106,
205
+ "step": 600
206
+ },
207
+ {
208
+ "epoch": 1.04,
209
+ "eval_loss": 1.2669302225112915,
210
+ "eval_runtime": 6.3128,
211
+ "eval_samples_per_second": 47.523,
212
+ "eval_steps_per_second": 1.267,
213
+ "step": 600
214
+ },
215
+ {
216
+ "epoch": 1.08,
217
+ "learning_rate": 0.000204,
218
+ "loss": 1.2046,
219
+ "step": 620
220
+ },
221
+ {
222
+ "epoch": 1.11,
223
+ "learning_rate": 0.00020030769230769229,
224
+ "loss": 1.2114,
225
+ "step": 640
226
+ },
227
+ {
228
+ "epoch": 1.15,
229
+ "learning_rate": 0.0001966153846153846,
230
+ "loss": 1.2122,
231
+ "step": 660
232
+ },
233
+ {
234
+ "epoch": 1.18,
235
+ "learning_rate": 0.00019310769230769227,
236
+ "loss": 1.1963,
237
+ "step": 680
238
+ },
239
+ {
240
+ "epoch": 1.22,
241
+ "learning_rate": 0.00018941538461538461,
242
+ "loss": 1.1965,
243
+ "step": 700
244
+ },
245
+ {
246
+ "epoch": 1.25,
247
+ "learning_rate": 0.0001857230769230769,
248
+ "loss": 1.1937,
249
+ "step": 720
250
+ },
251
+ {
252
+ "epoch": 1.29,
253
+ "learning_rate": 0.00018203076923076921,
254
+ "loss": 1.186,
255
+ "step": 740
256
+ },
257
+ {
258
+ "epoch": 1.32,
259
+ "learning_rate": 0.00017833846153846153,
260
+ "loss": 1.1732,
261
+ "step": 760
262
+ },
263
+ {
264
+ "epoch": 1.35,
265
+ "learning_rate": 0.00017464615384615381,
266
+ "loss": 1.1911,
267
+ "step": 780
268
+ },
269
+ {
270
+ "epoch": 1.39,
271
+ "learning_rate": 0.00017095384615384616,
272
+ "loss": 1.2153,
273
+ "step": 800
274
+ },
275
+ {
276
+ "epoch": 1.39,
277
+ "eval_loss": 1.2513903379440308,
278
+ "eval_runtime": 6.3069,
279
+ "eval_samples_per_second": 47.567,
280
+ "eval_steps_per_second": 1.268,
281
+ "step": 800
282
+ },
283
+ {
284
+ "epoch": 1.42,
285
+ "learning_rate": 0.00016726153846153844,
286
+ "loss": 1.1991,
287
+ "step": 820
288
+ },
289
+ {
290
+ "epoch": 1.46,
291
+ "learning_rate": 0.00016356923076923073,
292
+ "loss": 1.1934,
293
+ "step": 840
294
+ },
295
+ {
296
+ "epoch": 1.49,
297
+ "learning_rate": 0.00015987692307692307,
298
+ "loss": 1.2047,
299
+ "step": 860
300
+ },
301
+ {
302
+ "epoch": 1.53,
303
+ "learning_rate": 0.00015618461538461536,
304
+ "loss": 1.2204,
305
+ "step": 880
306
+ },
307
+ {
308
+ "epoch": 1.56,
309
+ "learning_rate": 0.0001524923076923077,
310
+ "loss": 1.1785,
311
+ "step": 900
312
+ },
313
+ {
314
+ "epoch": 1.6,
315
+ "learning_rate": 0.00014879999999999998,
316
+ "loss": 1.1756,
317
+ "step": 920
318
+ },
319
+ {
320
+ "epoch": 1.63,
321
+ "learning_rate": 0.0001451076923076923,
322
+ "loss": 1.214,
323
+ "step": 940
324
+ },
325
+ {
326
+ "epoch": 1.67,
327
+ "learning_rate": 0.0001414153846153846,
328
+ "loss": 1.1794,
329
+ "step": 960
330
+ },
331
+ {
332
+ "epoch": 1.7,
333
+ "learning_rate": 0.00013772307692307693,
334
+ "loss": 1.1946,
335
+ "step": 980
336
+ },
337
+ {
338
+ "epoch": 1.74,
339
+ "learning_rate": 0.0001340307692307692,
340
+ "loss": 1.175,
341
+ "step": 1000
342
+ },
343
+ {
344
+ "epoch": 1.74,
345
+ "eval_loss": 1.2406948804855347,
346
+ "eval_runtime": 6.3315,
347
+ "eval_samples_per_second": 47.382,
348
+ "eval_steps_per_second": 1.264,
349
+ "step": 1000
350
+ }
351
+ ],
352
+ "logging_steps": 20,
353
+ "max_steps": 1725,
354
+ "num_train_epochs": 3,
355
+ "save_steps": 200,
356
+ "total_flos": 2.3920781700366336e+18,
357
+ "trial_name": null,
358
+ "trial_params": null
359
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e5fa4a6be80dea6c8018bb49732568d9b22103b1e3942cf21f3b067cbdff444
3
+ size 4027
checkpoint-1200/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-1200/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "../chinese-llama-2-13b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-1200/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2ee310a6cf6a32e11abc2f6a70bd55159774708ca656382570d6b68a4b853d4
3
+ size 52562757
checkpoint-1200/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:688bcd22520549b8a89c6a81ce93e9faccc9eb18550468cab05ae219f18538ff
3
+ size 18679
checkpoint-1200/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b994e3ef13cb877d826e3c6e57158a640a118b797654c5dd97ff90090d295e3
3
+ size 18679
checkpoint-1200/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be4b3e81825c582eb5c46ad800e0b5989101507dbf4122ddcdf1cab9a466e935
3
+ size 18679
checkpoint-1200/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbdbbb81ee067d0a8931757b45688b5cb869fe14445248e936ab6ba60e3c9781
3
+ size 18679
checkpoint-1200/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:367ccf72cc4f5ec03b6635cb5a96094b34ab82051aefa6cddd5ff80a2655e832
3
+ size 18679
checkpoint-1200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2691a01f405e053da7ad7bcb0b3a2022cbc94dd82e3b77720da780c7ba897642
3
+ size 627
checkpoint-1200/trainer_state.json ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.2316911220550537,
3
+ "best_model_checkpoint": "../llama2-9439-21sept/checkpoint-1200",
4
+ "epoch": 2.0839363241678726,
5
+ "eval_steps": 200,
6
+ "global_step": 1200,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 5.9999999999999995e-05,
14
+ "loss": 3.4609,
15
+ "step": 20
16
+ },
17
+ {
18
+ "epoch": 0.07,
19
+ "learning_rate": 0.00011999999999999999,
20
+ "loss": 2.8346,
21
+ "step": 40
22
+ },
23
+ {
24
+ "epoch": 0.1,
25
+ "learning_rate": 0.00017999999999999998,
26
+ "loss": 1.7597,
27
+ "step": 60
28
+ },
29
+ {
30
+ "epoch": 0.14,
31
+ "learning_rate": 0.00023999999999999998,
32
+ "loss": 1.542,
33
+ "step": 80
34
+ },
35
+ {
36
+ "epoch": 0.17,
37
+ "learning_rate": 0.0003,
38
+ "loss": 1.4567,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.21,
43
+ "learning_rate": 0.0002963076923076923,
44
+ "loss": 1.4081,
45
+ "step": 120
46
+ },
47
+ {
48
+ "epoch": 0.24,
49
+ "learning_rate": 0.0002926153846153846,
50
+ "loss": 1.3805,
51
+ "step": 140
52
+ },
53
+ {
54
+ "epoch": 0.28,
55
+ "learning_rate": 0.0002889230769230769,
56
+ "loss": 1.3605,
57
+ "step": 160
58
+ },
59
+ {
60
+ "epoch": 0.31,
61
+ "learning_rate": 0.00028523076923076923,
62
+ "loss": 1.3329,
63
+ "step": 180
64
+ },
65
+ {
66
+ "epoch": 0.35,
67
+ "learning_rate": 0.0002815384615384615,
68
+ "loss": 1.3367,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.35,
73
+ "eval_loss": 1.3450770378112793,
74
+ "eval_runtime": 6.306,
75
+ "eval_samples_per_second": 47.574,
76
+ "eval_steps_per_second": 1.269,
77
+ "step": 200
78
+ },
79
+ {
80
+ "epoch": 0.38,
81
+ "learning_rate": 0.0002778461538461538,
82
+ "loss": 1.3152,
83
+ "step": 220
84
+ },
85
+ {
86
+ "epoch": 0.42,
87
+ "learning_rate": 0.0002741538461538461,
88
+ "loss": 1.312,
89
+ "step": 240
90
+ },
91
+ {
92
+ "epoch": 0.45,
93
+ "learning_rate": 0.00027046153846153843,
94
+ "loss": 1.2883,
95
+ "step": 260
96
+ },
97
+ {
98
+ "epoch": 0.49,
99
+ "learning_rate": 0.00026676923076923074,
100
+ "loss": 1.2843,
101
+ "step": 280
102
+ },
103
+ {
104
+ "epoch": 0.52,
105
+ "learning_rate": 0.00026307692307692306,
106
+ "loss": 1.2609,
107
+ "step": 300
108
+ },
109
+ {
110
+ "epoch": 0.56,
111
+ "learning_rate": 0.00025938461538461537,
112
+ "loss": 1.2707,
113
+ "step": 320
114
+ },
115
+ {
116
+ "epoch": 0.59,
117
+ "learning_rate": 0.0002556923076923077,
118
+ "loss": 1.2624,
119
+ "step": 340
120
+ },
121
+ {
122
+ "epoch": 0.63,
123
+ "learning_rate": 0.00025199999999999995,
124
+ "loss": 1.273,
125
+ "step": 360
126
+ },
127
+ {
128
+ "epoch": 0.66,
129
+ "learning_rate": 0.0002483076923076923,
130
+ "loss": 1.251,
131
+ "step": 380
132
+ },
133
+ {
134
+ "epoch": 0.69,
135
+ "learning_rate": 0.0002446153846153846,
136
+ "loss": 1.2648,
137
+ "step": 400
138
+ },
139
+ {
140
+ "epoch": 0.69,
141
+ "eval_loss": 1.292348861694336,
142
+ "eval_runtime": 6.304,
143
+ "eval_samples_per_second": 47.589,
144
+ "eval_steps_per_second": 1.269,
145
+ "step": 400
146
+ },
147
+ {
148
+ "epoch": 0.73,
149
+ "learning_rate": 0.0002409230769230769,
150
+ "loss": 1.265,
151
+ "step": 420
152
+ },
153
+ {
154
+ "epoch": 0.76,
155
+ "learning_rate": 0.0002372307692307692,
156
+ "loss": 1.2516,
157
+ "step": 440
158
+ },
159
+ {
160
+ "epoch": 0.8,
161
+ "learning_rate": 0.00023353846153846151,
162
+ "loss": 1.2441,
163
+ "step": 460
164
+ },
165
+ {
166
+ "epoch": 0.83,
167
+ "learning_rate": 0.00022984615384615383,
168
+ "loss": 1.2204,
169
+ "step": 480
170
+ },
171
+ {
172
+ "epoch": 0.87,
173
+ "learning_rate": 0.00022615384615384614,
174
+ "loss": 1.2221,
175
+ "step": 500
176
+ },
177
+ {
178
+ "epoch": 0.9,
179
+ "learning_rate": 0.00022246153846153846,
180
+ "loss": 1.2246,
181
+ "step": 520
182
+ },
183
+ {
184
+ "epoch": 0.94,
185
+ "learning_rate": 0.00021876923076923074,
186
+ "loss": 1.2227,
187
+ "step": 540
188
+ },
189
+ {
190
+ "epoch": 0.97,
191
+ "learning_rate": 0.00021507692307692306,
192
+ "loss": 1.2124,
193
+ "step": 560
194
+ },
195
+ {
196
+ "epoch": 1.01,
197
+ "learning_rate": 0.00021138461538461537,
198
+ "loss": 1.2065,
199
+ "step": 580
200
+ },
201
+ {
202
+ "epoch": 1.04,
203
+ "learning_rate": 0.00020769230769230766,
204
+ "loss": 1.2106,
205
+ "step": 600
206
+ },
207
+ {
208
+ "epoch": 1.04,
209
+ "eval_loss": 1.2669302225112915,
210
+ "eval_runtime": 6.3128,
211
+ "eval_samples_per_second": 47.523,
212
+ "eval_steps_per_second": 1.267,
213
+ "step": 600
214
+ },
215
+ {
216
+ "epoch": 1.08,
217
+ "learning_rate": 0.000204,
218
+ "loss": 1.2046,
219
+ "step": 620
220
+ },
221
+ {
222
+ "epoch": 1.11,
223
+ "learning_rate": 0.00020030769230769229,
224
+ "loss": 1.2114,
225
+ "step": 640
226
+ },
227
+ {
228
+ "epoch": 1.15,
229
+ "learning_rate": 0.0001966153846153846,
230
+ "loss": 1.2122,
231
+ "step": 660
232
+ },
233
+ {
234
+ "epoch": 1.18,
235
+ "learning_rate": 0.00019310769230769227,
236
+ "loss": 1.1963,
237
+ "step": 680
238
+ },
239
+ {
240
+ "epoch": 1.22,
241
+ "learning_rate": 0.00018941538461538461,
242
+ "loss": 1.1965,
243
+ "step": 700
244
+ },
245
+ {
246
+ "epoch": 1.25,
247
+ "learning_rate": 0.0001857230769230769,
248
+ "loss": 1.1937,
249
+ "step": 720
250
+ },
251
+ {
252
+ "epoch": 1.29,
253
+ "learning_rate": 0.00018203076923076921,
254
+ "loss": 1.186,
255
+ "step": 740
256
+ },
257
+ {
258
+ "epoch": 1.32,
259
+ "learning_rate": 0.00017833846153846153,
260
+ "loss": 1.1732,
261
+ "step": 760
262
+ },
263
+ {
264
+ "epoch": 1.35,
265
+ "learning_rate": 0.00017464615384615381,
266
+ "loss": 1.1911,
267
+ "step": 780
268
+ },
269
+ {
270
+ "epoch": 1.39,
271
+ "learning_rate": 0.00017095384615384616,
272
+ "loss": 1.2153,
273
+ "step": 800
274
+ },
275
+ {
276
+ "epoch": 1.39,
277
+ "eval_loss": 1.2513903379440308,
278
+ "eval_runtime": 6.3069,
279
+ "eval_samples_per_second": 47.567,
280
+ "eval_steps_per_second": 1.268,
281
+ "step": 800
282
+ },
283
+ {
284
+ "epoch": 1.42,
285
+ "learning_rate": 0.00016726153846153844,
286
+ "loss": 1.1991,
287
+ "step": 820
288
+ },
289
+ {
290
+ "epoch": 1.46,
291
+ "learning_rate": 0.00016356923076923073,
292
+ "loss": 1.1934,
293
+ "step": 840
294
+ },
295
+ {
296
+ "epoch": 1.49,
297
+ "learning_rate": 0.00015987692307692307,
298
+ "loss": 1.2047,
299
+ "step": 860
300
+ },
301
+ {
302
+ "epoch": 1.53,
303
+ "learning_rate": 0.00015618461538461536,
304
+ "loss": 1.2204,
305
+ "step": 880
306
+ },
307
+ {
308
+ "epoch": 1.56,
309
+ "learning_rate": 0.0001524923076923077,
310
+ "loss": 1.1785,
311
+ "step": 900
312
+ },
313
+ {
314
+ "epoch": 1.6,
315
+ "learning_rate": 0.00014879999999999998,
316
+ "loss": 1.1756,
317
+ "step": 920
318
+ },
319
+ {
320
+ "epoch": 1.63,
321
+ "learning_rate": 0.0001451076923076923,
322
+ "loss": 1.214,
323
+ "step": 940
324
+ },
325
+ {
326
+ "epoch": 1.67,
327
+ "learning_rate": 0.0001414153846153846,
328
+ "loss": 1.1794,
329
+ "step": 960
330
+ },
331
+ {
332
+ "epoch": 1.7,
333
+ "learning_rate": 0.00013772307692307693,
334
+ "loss": 1.1946,
335
+ "step": 980
336
+ },
337
+ {
338
+ "epoch": 1.74,
339
+ "learning_rate": 0.0001340307692307692,
340
+ "loss": 1.175,
341
+ "step": 1000
342
+ },
343
+ {
344
+ "epoch": 1.74,
345
+ "eval_loss": 1.2406948804855347,
346
+ "eval_runtime": 6.3315,
347
+ "eval_samples_per_second": 47.382,
348
+ "eval_steps_per_second": 1.264,
349
+ "step": 1000
350
+ },
351
+ {
352
+ "epoch": 1.77,
353
+ "learning_rate": 0.00013033846153846153,
354
+ "loss": 1.178,
355
+ "step": 1020
356
+ },
357
+ {
358
+ "epoch": 1.81,
359
+ "learning_rate": 0.00012664615384615384,
360
+ "loss": 1.1859,
361
+ "step": 1040
362
+ },
363
+ {
364
+ "epoch": 1.84,
365
+ "learning_rate": 0.00012295384615384615,
366
+ "loss": 1.1801,
367
+ "step": 1060
368
+ },
369
+ {
370
+ "epoch": 1.88,
371
+ "learning_rate": 0.00011926153846153845,
372
+ "loss": 1.1743,
373
+ "step": 1080
374
+ },
375
+ {
376
+ "epoch": 1.91,
377
+ "learning_rate": 0.00011556923076923076,
378
+ "loss": 1.1596,
379
+ "step": 1100
380
+ },
381
+ {
382
+ "epoch": 1.95,
383
+ "learning_rate": 0.00011187692307692307,
384
+ "loss": 1.1495,
385
+ "step": 1120
386
+ },
387
+ {
388
+ "epoch": 1.98,
389
+ "learning_rate": 0.00010818461538461537,
390
+ "loss": 1.1808,
391
+ "step": 1140
392
+ },
393
+ {
394
+ "epoch": 2.01,
395
+ "learning_rate": 0.00010449230769230768,
396
+ "loss": 1.1743,
397
+ "step": 1160
398
+ },
399
+ {
400
+ "epoch": 2.05,
401
+ "learning_rate": 0.0001008,
402
+ "loss": 1.1354,
403
+ "step": 1180
404
+ },
405
+ {
406
+ "epoch": 2.08,
407
+ "learning_rate": 9.71076923076923e-05,
408
+ "loss": 1.1712,
409
+ "step": 1200
410
+ },
411
+ {
412
+ "epoch": 2.08,
413
+ "eval_loss": 1.2316911220550537,
414
+ "eval_runtime": 6.3191,
415
+ "eval_samples_per_second": 47.475,
416
+ "eval_steps_per_second": 1.266,
417
+ "step": 1200
418
+ }
419
+ ],
420
+ "logging_steps": 20,
421
+ "max_steps": 1725,
422
+ "num_train_epochs": 3,
423
+ "save_steps": 200,
424
+ "total_flos": 2.8704938040439603e+18,
425
+ "trial_name": null,
426
+ "trial_params": null
427
+ }
checkpoint-1200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e5fa4a6be80dea6c8018bb49732568d9b22103b1e3942cf21f3b067cbdff444
3
+ size 4027
checkpoint-1400/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-1400/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "../chinese-llama-2-13b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-1400/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1400/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2a49b0f46ce2aee7f28498fb481e3333d0b4b3e6587d7060cd0db1d38793b26
3
+ size 52562757
checkpoint-1400/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1e3dc83a5fca822891bba2d605222ab74cd99f1918102304ca4b65d3137d789
3
+ size 18679
checkpoint-1400/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e64ecf9f57a459867d36288ab900f39ea916387621609dd3d66048fb0ea5d7c
3
+ size 18679
checkpoint-1400/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58d7efa004a69734b841cd2f4519073da72d8de1db4e8465a104023e1003e073
3
+ size 18679
checkpoint-1400/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2d36476c1f68e57dbd03ff794033f730ef968f8d9e8aecf8299dc882a68cb2e
3
+ size 18679
checkpoint-1400/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83eb914f8503bb55b84fbbf3b99e7ce79d625c6cc0f4d412abba661d5393af1a
3
+ size 18679
checkpoint-1400/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c5b9da5e9b4de58246d24ef97f5a656736c469da066e184cfd52c712accb4b1
3
+ size 627
checkpoint-1400/trainer_state.json ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.2265406847000122,
3
+ "best_model_checkpoint": "../llama2-9439-21sept/checkpoint-1400",
4
+ "epoch": 2.431259044862518,
5
+ "eval_steps": 200,
6
+ "global_step": 1400,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 5.9999999999999995e-05,
14
+ "loss": 3.4609,
15
+ "step": 20
16
+ },
17
+ {
18
+ "epoch": 0.07,
19
+ "learning_rate": 0.00011999999999999999,
20
+ "loss": 2.8346,
21
+ "step": 40
22
+ },
23
+ {
24
+ "epoch": 0.1,
25
+ "learning_rate": 0.00017999999999999998,
26
+ "loss": 1.7597,
27
+ "step": 60
28
+ },
29
+ {
30
+ "epoch": 0.14,
31
+ "learning_rate": 0.00023999999999999998,
32
+ "loss": 1.542,
33
+ "step": 80
34
+ },
35
+ {
36
+ "epoch": 0.17,
37
+ "learning_rate": 0.0003,
38
+ "loss": 1.4567,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.21,
43
+ "learning_rate": 0.0002963076923076923,
44
+ "loss": 1.4081,
45
+ "step": 120
46
+ },
47
+ {
48
+ "epoch": 0.24,
49
+ "learning_rate": 0.0002926153846153846,
50
+ "loss": 1.3805,
51
+ "step": 140
52
+ },
53
+ {
54
+ "epoch": 0.28,
55
+ "learning_rate": 0.0002889230769230769,
56
+ "loss": 1.3605,
57
+ "step": 160
58
+ },
59
+ {
60
+ "epoch": 0.31,
61
+ "learning_rate": 0.00028523076923076923,
62
+ "loss": 1.3329,
63
+ "step": 180
64
+ },
65
+ {
66
+ "epoch": 0.35,
67
+ "learning_rate": 0.0002815384615384615,
68
+ "loss": 1.3367,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.35,
73
+ "eval_loss": 1.3450770378112793,
74
+ "eval_runtime": 6.306,
75
+ "eval_samples_per_second": 47.574,
76
+ "eval_steps_per_second": 1.269,
77
+ "step": 200
78
+ },
79
+ {
80
+ "epoch": 0.38,
81
+ "learning_rate": 0.0002778461538461538,
82
+ "loss": 1.3152,
83
+ "step": 220
84
+ },
85
+ {
86
+ "epoch": 0.42,
87
+ "learning_rate": 0.0002741538461538461,
88
+ "loss": 1.312,
89
+ "step": 240
90
+ },
91
+ {
92
+ "epoch": 0.45,
93
+ "learning_rate": 0.00027046153846153843,
94
+ "loss": 1.2883,
95
+ "step": 260
96
+ },
97
+ {
98
+ "epoch": 0.49,
99
+ "learning_rate": 0.00026676923076923074,
100
+ "loss": 1.2843,
101
+ "step": 280
102
+ },
103
+ {
104
+ "epoch": 0.52,
105
+ "learning_rate": 0.00026307692307692306,
106
+ "loss": 1.2609,
107
+ "step": 300
108
+ },
109
+ {
110
+ "epoch": 0.56,
111
+ "learning_rate": 0.00025938461538461537,
112
+ "loss": 1.2707,
113
+ "step": 320
114
+ },
115
+ {
116
+ "epoch": 0.59,
117
+ "learning_rate": 0.0002556923076923077,
118
+ "loss": 1.2624,
119
+ "step": 340
120
+ },
121
+ {
122
+ "epoch": 0.63,
123
+ "learning_rate": 0.00025199999999999995,
124
+ "loss": 1.273,
125
+ "step": 360
126
+ },
127
+ {
128
+ "epoch": 0.66,
129
+ "learning_rate": 0.0002483076923076923,
130
+ "loss": 1.251,
131
+ "step": 380
132
+ },
133
+ {
134
+ "epoch": 0.69,
135
+ "learning_rate": 0.0002446153846153846,
136
+ "loss": 1.2648,
137
+ "step": 400
138
+ },
139
+ {
140
+ "epoch": 0.69,
141
+ "eval_loss": 1.292348861694336,
142
+ "eval_runtime": 6.304,
143
+ "eval_samples_per_second": 47.589,
144
+ "eval_steps_per_second": 1.269,
145
+ "step": 400
146
+ },
147
+ {
148
+ "epoch": 0.73,
149
+ "learning_rate": 0.0002409230769230769,
150
+ "loss": 1.265,
151
+ "step": 420
152
+ },
153
+ {
154
+ "epoch": 0.76,
155
+ "learning_rate": 0.0002372307692307692,
156
+ "loss": 1.2516,
157
+ "step": 440
158
+ },
159
+ {
160
+ "epoch": 0.8,
161
+ "learning_rate": 0.00023353846153846151,
162
+ "loss": 1.2441,
163
+ "step": 460
164
+ },
165
+ {
166
+ "epoch": 0.83,
167
+ "learning_rate": 0.00022984615384615383,
168
+ "loss": 1.2204,
169
+ "step": 480
170
+ },
171
+ {
172
+ "epoch": 0.87,
173
+ "learning_rate": 0.00022615384615384614,
174
+ "loss": 1.2221,
175
+ "step": 500
176
+ },
177
+ {
178
+ "epoch": 0.9,
179
+ "learning_rate": 0.00022246153846153846,
180
+ "loss": 1.2246,
181
+ "step": 520
182
+ },
183
+ {
184
+ "epoch": 0.94,
185
+ "learning_rate": 0.00021876923076923074,
186
+ "loss": 1.2227,
187
+ "step": 540
188
+ },
189
+ {
190
+ "epoch": 0.97,
191
+ "learning_rate": 0.00021507692307692306,
192
+ "loss": 1.2124,
193
+ "step": 560
194
+ },
195
+ {
196
+ "epoch": 1.01,
197
+ "learning_rate": 0.00021138461538461537,
198
+ "loss": 1.2065,
199
+ "step": 580
200
+ },
201
+ {
202
+ "epoch": 1.04,
203
+ "learning_rate": 0.00020769230769230766,
204
+ "loss": 1.2106,
205
+ "step": 600
206
+ },
207
+ {
208
+ "epoch": 1.04,
209
+ "eval_loss": 1.2669302225112915,
210
+ "eval_runtime": 6.3128,
211
+ "eval_samples_per_second": 47.523,
212
+ "eval_steps_per_second": 1.267,
213
+ "step": 600
214
+ },
215
+ {
216
+ "epoch": 1.08,
217
+ "learning_rate": 0.000204,
218
+ "loss": 1.2046,
219
+ "step": 620
220
+ },
221
+ {
222
+ "epoch": 1.11,
223
+ "learning_rate": 0.00020030769230769229,
224
+ "loss": 1.2114,
225
+ "step": 640
226
+ },
227
+ {
228
+ "epoch": 1.15,
229
+ "learning_rate": 0.0001966153846153846,
230
+ "loss": 1.2122,
231
+ "step": 660
232
+ },
233
+ {
234
+ "epoch": 1.18,
235
+ "learning_rate": 0.00019310769230769227,
236
+ "loss": 1.1963,
237
+ "step": 680
238
+ },
239
+ {
240
+ "epoch": 1.22,
241
+ "learning_rate": 0.00018941538461538461,
242
+ "loss": 1.1965,
243
+ "step": 700
244
+ },
245
+ {
246
+ "epoch": 1.25,
247
+ "learning_rate": 0.0001857230769230769,
248
+ "loss": 1.1937,
249
+ "step": 720
250
+ },
251
+ {
252
+ "epoch": 1.29,
253
+ "learning_rate": 0.00018203076923076921,
254
+ "loss": 1.186,
255
+ "step": 740
256
+ },
257
+ {
258
+ "epoch": 1.32,
259
+ "learning_rate": 0.00017833846153846153,
260
+ "loss": 1.1732,
261
+ "step": 760
262
+ },
263
+ {
264
+ "epoch": 1.35,
265
+ "learning_rate": 0.00017464615384615381,
266
+ "loss": 1.1911,
267
+ "step": 780
268
+ },
269
+ {
270
+ "epoch": 1.39,
271
+ "learning_rate": 0.00017095384615384616,
272
+ "loss": 1.2153,
273
+ "step": 800
274
+ },
275
+ {
276
+ "epoch": 1.39,
277
+ "eval_loss": 1.2513903379440308,
278
+ "eval_runtime": 6.3069,
279
+ "eval_samples_per_second": 47.567,
280
+ "eval_steps_per_second": 1.268,
281
+ "step": 800
282
+ },
283
+ {
284
+ "epoch": 1.42,
285
+ "learning_rate": 0.00016726153846153844,
286
+ "loss": 1.1991,
287
+ "step": 820
288
+ },
289
+ {
290
+ "epoch": 1.46,
291
+ "learning_rate": 0.00016356923076923073,
292
+ "loss": 1.1934,
293
+ "step": 840
294
+ },
295
+ {
296
+ "epoch": 1.49,
297
+ "learning_rate": 0.00015987692307692307,
298
+ "loss": 1.2047,
299
+ "step": 860
300
+ },
301
+ {
302
+ "epoch": 1.53,
303
+ "learning_rate": 0.00015618461538461536,
304
+ "loss": 1.2204,
305
+ "step": 880
306
+ },
307
+ {
308
+ "epoch": 1.56,
309
+ "learning_rate": 0.0001524923076923077,
310
+ "loss": 1.1785,
311
+ "step": 900
312
+ },
313
+ {
314
+ "epoch": 1.6,
315
+ "learning_rate": 0.00014879999999999998,
316
+ "loss": 1.1756,
317
+ "step": 920
318
+ },
319
+ {
320
+ "epoch": 1.63,
321
+ "learning_rate": 0.0001451076923076923,
322
+ "loss": 1.214,
323
+ "step": 940
324
+ },
325
+ {
326
+ "epoch": 1.67,
327
+ "learning_rate": 0.0001414153846153846,
328
+ "loss": 1.1794,
329
+ "step": 960
330
+ },
331
+ {
332
+ "epoch": 1.7,
333
+ "learning_rate": 0.00013772307692307693,
334
+ "loss": 1.1946,
335
+ "step": 980
336
+ },
337
+ {
338
+ "epoch": 1.74,
339
+ "learning_rate": 0.0001340307692307692,
340
+ "loss": 1.175,
341
+ "step": 1000
342
+ },
343
+ {
344
+ "epoch": 1.74,
345
+ "eval_loss": 1.2406948804855347,
346
+ "eval_runtime": 6.3315,
347
+ "eval_samples_per_second": 47.382,
348
+ "eval_steps_per_second": 1.264,
349
+ "step": 1000
350
+ },
351
+ {
352
+ "epoch": 1.77,
353
+ "learning_rate": 0.00013033846153846153,
354
+ "loss": 1.178,
355
+ "step": 1020
356
+ },
357
+ {
358
+ "epoch": 1.81,
359
+ "learning_rate": 0.00012664615384615384,
360
+ "loss": 1.1859,
361
+ "step": 1040
362
+ },
363
+ {
364
+ "epoch": 1.84,
365
+ "learning_rate": 0.00012295384615384615,
366
+ "loss": 1.1801,
367
+ "step": 1060
368
+ },
369
+ {
370
+ "epoch": 1.88,
371
+ "learning_rate": 0.00011926153846153845,
372
+ "loss": 1.1743,
373
+ "step": 1080
374
+ },
375
+ {
376
+ "epoch": 1.91,
377
+ "learning_rate": 0.00011556923076923076,
378
+ "loss": 1.1596,
379
+ "step": 1100
380
+ },
381
+ {
382
+ "epoch": 1.95,
383
+ "learning_rate": 0.00011187692307692307,
384
+ "loss": 1.1495,
385
+ "step": 1120
386
+ },
387
+ {
388
+ "epoch": 1.98,
389
+ "learning_rate": 0.00010818461538461537,
390
+ "loss": 1.1808,
391
+ "step": 1140
392
+ },
393
+ {
394
+ "epoch": 2.01,
395
+ "learning_rate": 0.00010449230769230768,
396
+ "loss": 1.1743,
397
+ "step": 1160
398
+ },
399
+ {
400
+ "epoch": 2.05,
401
+ "learning_rate": 0.0001008,
402
+ "loss": 1.1354,
403
+ "step": 1180
404
+ },
405
+ {
406
+ "epoch": 2.08,
407
+ "learning_rate": 9.71076923076923e-05,
408
+ "loss": 1.1712,
409
+ "step": 1200
410
+ },
411
+ {
412
+ "epoch": 2.08,
413
+ "eval_loss": 1.2316911220550537,
414
+ "eval_runtime": 6.3191,
415
+ "eval_samples_per_second": 47.475,
416
+ "eval_steps_per_second": 1.266,
417
+ "step": 1200
418
+ },
419
+ {
420
+ "epoch": 2.12,
421
+ "learning_rate": 9.34153846153846e-05,
422
+ "loss": 1.1709,
423
+ "step": 1220
424
+ },
425
+ {
426
+ "epoch": 2.15,
427
+ "learning_rate": 8.972307692307691e-05,
428
+ "loss": 1.1667,
429
+ "step": 1240
430
+ },
431
+ {
432
+ "epoch": 2.19,
433
+ "learning_rate": 8.603076923076923e-05,
434
+ "loss": 1.1781,
435
+ "step": 1260
436
+ },
437
+ {
438
+ "epoch": 2.22,
439
+ "learning_rate": 8.233846153846154e-05,
440
+ "loss": 1.1628,
441
+ "step": 1280
442
+ },
443
+ {
444
+ "epoch": 2.26,
445
+ "learning_rate": 7.864615384615383e-05,
446
+ "loss": 1.1723,
447
+ "step": 1300
448
+ },
449
+ {
450
+ "epoch": 2.29,
451
+ "learning_rate": 7.495384615384615e-05,
452
+ "loss": 1.1578,
453
+ "step": 1320
454
+ },
455
+ {
456
+ "epoch": 2.33,
457
+ "learning_rate": 7.126153846153845e-05,
458
+ "loss": 1.1399,
459
+ "step": 1340
460
+ },
461
+ {
462
+ "epoch": 2.36,
463
+ "learning_rate": 6.756923076923077e-05,
464
+ "loss": 1.1612,
465
+ "step": 1360
466
+ },
467
+ {
468
+ "epoch": 2.4,
469
+ "learning_rate": 6.387692307692307e-05,
470
+ "loss": 1.1582,
471
+ "step": 1380
472
+ },
473
+ {
474
+ "epoch": 2.43,
475
+ "learning_rate": 6.0184615384615375e-05,
476
+ "loss": 1.1534,
477
+ "step": 1400
478
+ },
479
+ {
480
+ "epoch": 2.43,
481
+ "eval_loss": 1.2265406847000122,
482
+ "eval_runtime": 6.3176,
483
+ "eval_samples_per_second": 47.486,
484
+ "eval_steps_per_second": 1.266,
485
+ "step": 1400
486
+ }
487
+ ],
488
+ "logging_steps": 20,
489
+ "max_steps": 1725,
490
+ "num_train_epochs": 3,
491
+ "save_steps": 200,
492
+ "total_flos": 3.348909438051287e+18,
493
+ "trial_name": null,
494
+ "trial_params": null
495
+ }
checkpoint-1400/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e5fa4a6be80dea6c8018bb49732568d9b22103b1e3942cf21f3b067cbdff444
3
+ size 4027
checkpoint-1600/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-1600/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "../chinese-llama-2-13b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-1600/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-1600/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:063a994a29578fb2287de3c40d6739c12e280d2c013c64b504c30c49b921d841
3
+ size 52562757
checkpoint-1600/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf98e784e6b8dc883e2b800a4e11a6bf86ba1127f10f4f1235ba37d2fff2ec1d
3
+ size 18679
checkpoint-1600/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de58a6082f6549d7efc38756577f571cb6e854db8c6e315cf12c4d5166beb810
3
+ size 18679
checkpoint-1600/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd5ae166dffc6c6455bac816ad340b2c97433f56b532222b69bf2d7b7f170195
3
+ size 18679
checkpoint-1600/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2567bbf472792e86322a470e9e62452e879e353cbc25534507c3f539691f3a94
3
+ size 18679
checkpoint-1600/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64dea77acac01ff60d3c4eb7bf7b6bce6a5b64093fcaa66ea3b7475ce7bf5a18
3
+ size 18679
checkpoint-1600/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b65eb6f72e0fedc2475341926889dc05fe463519f18fb1a3fcf45b742f89af60
3
+ size 627
checkpoint-1600/trainer_state.json ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.2222874164581299,
3
+ "best_model_checkpoint": "../llama2-9439-21sept/checkpoint-1600",
4
+ "epoch": 2.7785817655571634,
5
+ "eval_steps": 200,
6
+ "global_step": 1600,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 5.9999999999999995e-05,
14
+ "loss": 3.4609,
15
+ "step": 20
16
+ },
17
+ {
18
+ "epoch": 0.07,
19
+ "learning_rate": 0.00011999999999999999,
20
+ "loss": 2.8346,
21
+ "step": 40
22
+ },
23
+ {
24
+ "epoch": 0.1,
25
+ "learning_rate": 0.00017999999999999998,
26
+ "loss": 1.7597,
27
+ "step": 60
28
+ },
29
+ {
30
+ "epoch": 0.14,
31
+ "learning_rate": 0.00023999999999999998,
32
+ "loss": 1.542,
33
+ "step": 80
34
+ },
35
+ {
36
+ "epoch": 0.17,
37
+ "learning_rate": 0.0003,
38
+ "loss": 1.4567,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.21,
43
+ "learning_rate": 0.0002963076923076923,
44
+ "loss": 1.4081,
45
+ "step": 120
46
+ },
47
+ {
48
+ "epoch": 0.24,
49
+ "learning_rate": 0.0002926153846153846,
50
+ "loss": 1.3805,
51
+ "step": 140
52
+ },
53
+ {
54
+ "epoch": 0.28,
55
+ "learning_rate": 0.0002889230769230769,
56
+ "loss": 1.3605,
57
+ "step": 160
58
+ },
59
+ {
60
+ "epoch": 0.31,
61
+ "learning_rate": 0.00028523076923076923,
62
+ "loss": 1.3329,
63
+ "step": 180
64
+ },
65
+ {
66
+ "epoch": 0.35,
67
+ "learning_rate": 0.0002815384615384615,
68
+ "loss": 1.3367,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.35,
73
+ "eval_loss": 1.3450770378112793,
74
+ "eval_runtime": 6.306,
75
+ "eval_samples_per_second": 47.574,
76
+ "eval_steps_per_second": 1.269,
77
+ "step": 200
78
+ },
79
+ {
80
+ "epoch": 0.38,
81
+ "learning_rate": 0.0002778461538461538,
82
+ "loss": 1.3152,
83
+ "step": 220
84
+ },
85
+ {
86
+ "epoch": 0.42,
87
+ "learning_rate": 0.0002741538461538461,
88
+ "loss": 1.312,
89
+ "step": 240
90
+ },
91
+ {
92
+ "epoch": 0.45,
93
+ "learning_rate": 0.00027046153846153843,
94
+ "loss": 1.2883,
95
+ "step": 260
96
+ },
97
+ {
98
+ "epoch": 0.49,
99
+ "learning_rate": 0.00026676923076923074,
100
+ "loss": 1.2843,
101
+ "step": 280
102
+ },
103
+ {
104
+ "epoch": 0.52,
105
+ "learning_rate": 0.00026307692307692306,
106
+ "loss": 1.2609,
107
+ "step": 300
108
+ },
109
+ {
110
+ "epoch": 0.56,
111
+ "learning_rate": 0.00025938461538461537,
112
+ "loss": 1.2707,
113
+ "step": 320
114
+ },
115
+ {
116
+ "epoch": 0.59,
117
+ "learning_rate": 0.0002556923076923077,
118
+ "loss": 1.2624,
119
+ "step": 340
120
+ },
121
+ {
122
+ "epoch": 0.63,
123
+ "learning_rate": 0.00025199999999999995,
124
+ "loss": 1.273,
125
+ "step": 360
126
+ },
127
+ {
128
+ "epoch": 0.66,
129
+ "learning_rate": 0.0002483076923076923,
130
+ "loss": 1.251,
131
+ "step": 380
132
+ },
133
+ {
134
+ "epoch": 0.69,
135
+ "learning_rate": 0.0002446153846153846,
136
+ "loss": 1.2648,
137
+ "step": 400
138
+ },
139
+ {
140
+ "epoch": 0.69,
141
+ "eval_loss": 1.292348861694336,
142
+ "eval_runtime": 6.304,
143
+ "eval_samples_per_second": 47.589,
144
+ "eval_steps_per_second": 1.269,
145
+ "step": 400
146
+ },
147
+ {
148
+ "epoch": 0.73,
149
+ "learning_rate": 0.0002409230769230769,
150
+ "loss": 1.265,
151
+ "step": 420
152
+ },
153
+ {
154
+ "epoch": 0.76,
155
+ "learning_rate": 0.0002372307692307692,
156
+ "loss": 1.2516,
157
+ "step": 440
158
+ },
159
+ {
160
+ "epoch": 0.8,
161
+ "learning_rate": 0.00023353846153846151,
162
+ "loss": 1.2441,
163
+ "step": 460
164
+ },
165
+ {
166
+ "epoch": 0.83,
167
+ "learning_rate": 0.00022984615384615383,
168
+ "loss": 1.2204,
169
+ "step": 480
170
+ },
171
+ {
172
+ "epoch": 0.87,
173
+ "learning_rate": 0.00022615384615384614,
174
+ "loss": 1.2221,
175
+ "step": 500
176
+ },
177
+ {
178
+ "epoch": 0.9,
179
+ "learning_rate": 0.00022246153846153846,
180
+ "loss": 1.2246,
181
+ "step": 520
182
+ },
183
+ {
184
+ "epoch": 0.94,
185
+ "learning_rate": 0.00021876923076923074,
186
+ "loss": 1.2227,
187
+ "step": 540
188
+ },
189
+ {
190
+ "epoch": 0.97,
191
+ "learning_rate": 0.00021507692307692306,
192
+ "loss": 1.2124,
193
+ "step": 560
194
+ },
195
+ {
196
+ "epoch": 1.01,
197
+ "learning_rate": 0.00021138461538461537,
198
+ "loss": 1.2065,
199
+ "step": 580
200
+ },
201
+ {
202
+ "epoch": 1.04,
203
+ "learning_rate": 0.00020769230769230766,
204
+ "loss": 1.2106,
205
+ "step": 600
206
+ },
207
+ {
208
+ "epoch": 1.04,
209
+ "eval_loss": 1.2669302225112915,
210
+ "eval_runtime": 6.3128,
211
+ "eval_samples_per_second": 47.523,
212
+ "eval_steps_per_second": 1.267,
213
+ "step": 600
214
+ },
215
+ {
216
+ "epoch": 1.08,
217
+ "learning_rate": 0.000204,
218
+ "loss": 1.2046,
219
+ "step": 620
220
+ },
221
+ {
222
+ "epoch": 1.11,
223
+ "learning_rate": 0.00020030769230769229,
224
+ "loss": 1.2114,
225
+ "step": 640
226
+ },
227
+ {
228
+ "epoch": 1.15,
229
+ "learning_rate": 0.0001966153846153846,
230
+ "loss": 1.2122,
231
+ "step": 660
232
+ },
233
+ {
234
+ "epoch": 1.18,
235
+ "learning_rate": 0.00019310769230769227,
236
+ "loss": 1.1963,
237
+ "step": 680
238
+ },
239
+ {
240
+ "epoch": 1.22,
241
+ "learning_rate": 0.00018941538461538461,
242
+ "loss": 1.1965,
243
+ "step": 700
244
+ },
245
+ {
246
+ "epoch": 1.25,
247
+ "learning_rate": 0.0001857230769230769,
248
+ "loss": 1.1937,
249
+ "step": 720
250
+ },
251
+ {
252
+ "epoch": 1.29,
253
+ "learning_rate": 0.00018203076923076921,
254
+ "loss": 1.186,
255
+ "step": 740
256
+ },
257
+ {
258
+ "epoch": 1.32,
259
+ "learning_rate": 0.00017833846153846153,
260
+ "loss": 1.1732,
261
+ "step": 760
262
+ },
263
+ {
264
+ "epoch": 1.35,
265
+ "learning_rate": 0.00017464615384615381,
266
+ "loss": 1.1911,
267
+ "step": 780
268
+ },
269
+ {
270
+ "epoch": 1.39,
271
+ "learning_rate": 0.00017095384615384616,
272
+ "loss": 1.2153,
273
+ "step": 800
274
+ },
275
+ {
276
+ "epoch": 1.39,
277
+ "eval_loss": 1.2513903379440308,
278
+ "eval_runtime": 6.3069,
279
+ "eval_samples_per_second": 47.567,
280
+ "eval_steps_per_second": 1.268,
281
+ "step": 800
282
+ },
283
+ {
284
+ "epoch": 1.42,
285
+ "learning_rate": 0.00016726153846153844,
286
+ "loss": 1.1991,
287
+ "step": 820
288
+ },
289
+ {
290
+ "epoch": 1.46,
291
+ "learning_rate": 0.00016356923076923073,
292
+ "loss": 1.1934,
293
+ "step": 840
294
+ },
295
+ {
296
+ "epoch": 1.49,
297
+ "learning_rate": 0.00015987692307692307,
298
+ "loss": 1.2047,
299
+ "step": 860
300
+ },
301
+ {
302
+ "epoch": 1.53,
303
+ "learning_rate": 0.00015618461538461536,
304
+ "loss": 1.2204,
305
+ "step": 880
306
+ },
307
+ {
308
+ "epoch": 1.56,
309
+ "learning_rate": 0.0001524923076923077,
310
+ "loss": 1.1785,
311
+ "step": 900
312
+ },
313
+ {
314
+ "epoch": 1.6,
315
+ "learning_rate": 0.00014879999999999998,
316
+ "loss": 1.1756,
317
+ "step": 920
318
+ },
319
+ {
320
+ "epoch": 1.63,
321
+ "learning_rate": 0.0001451076923076923,
322
+ "loss": 1.214,
323
+ "step": 940
324
+ },
325
+ {
326
+ "epoch": 1.67,
327
+ "learning_rate": 0.0001414153846153846,
328
+ "loss": 1.1794,
329
+ "step": 960
330
+ },
331
+ {
332
+ "epoch": 1.7,
333
+ "learning_rate": 0.00013772307692307693,
334
+ "loss": 1.1946,
335
+ "step": 980
336
+ },
337
+ {
338
+ "epoch": 1.74,
339
+ "learning_rate": 0.0001340307692307692,
340
+ "loss": 1.175,
341
+ "step": 1000
342
+ },
343
+ {
344
+ "epoch": 1.74,
345
+ "eval_loss": 1.2406948804855347,
346
+ "eval_runtime": 6.3315,
347
+ "eval_samples_per_second": 47.382,
348
+ "eval_steps_per_second": 1.264,
349
+ "step": 1000
350
+ },
351
+ {
352
+ "epoch": 1.77,
353
+ "learning_rate": 0.00013033846153846153,
354
+ "loss": 1.178,
355
+ "step": 1020
356
+ },
357
+ {
358
+ "epoch": 1.81,
359
+ "learning_rate": 0.00012664615384615384,
360
+ "loss": 1.1859,
361
+ "step": 1040
362
+ },
363
+ {
364
+ "epoch": 1.84,
365
+ "learning_rate": 0.00012295384615384615,
366
+ "loss": 1.1801,
367
+ "step": 1060
368
+ },
369
+ {
370
+ "epoch": 1.88,
371
+ "learning_rate": 0.00011926153846153845,
372
+ "loss": 1.1743,
373
+ "step": 1080
374
+ },
375
+ {
376
+ "epoch": 1.91,
377
+ "learning_rate": 0.00011556923076923076,
378
+ "loss": 1.1596,
379
+ "step": 1100
380
+ },
381
+ {
382
+ "epoch": 1.95,
383
+ "learning_rate": 0.00011187692307692307,
384
+ "loss": 1.1495,
385
+ "step": 1120
386
+ },
387
+ {
388
+ "epoch": 1.98,
389
+ "learning_rate": 0.00010818461538461537,
390
+ "loss": 1.1808,
391
+ "step": 1140
392
+ },
393
+ {
394
+ "epoch": 2.01,
395
+ "learning_rate": 0.00010449230769230768,
396
+ "loss": 1.1743,
397
+ "step": 1160
398
+ },
399
+ {
400
+ "epoch": 2.05,
401
+ "learning_rate": 0.0001008,
402
+ "loss": 1.1354,
403
+ "step": 1180
404
+ },
405
+ {
406
+ "epoch": 2.08,
407
+ "learning_rate": 9.71076923076923e-05,
408
+ "loss": 1.1712,
409
+ "step": 1200
410
+ },
411
+ {
412
+ "epoch": 2.08,
413
+ "eval_loss": 1.2316911220550537,
414
+ "eval_runtime": 6.3191,
415
+ "eval_samples_per_second": 47.475,
416
+ "eval_steps_per_second": 1.266,
417
+ "step": 1200
418
+ },
419
+ {
420
+ "epoch": 2.12,
421
+ "learning_rate": 9.34153846153846e-05,
422
+ "loss": 1.1709,
423
+ "step": 1220
424
+ },
425
+ {
426
+ "epoch": 2.15,
427
+ "learning_rate": 8.972307692307691e-05,
428
+ "loss": 1.1667,
429
+ "step": 1240
430
+ },
431
+ {
432
+ "epoch": 2.19,
433
+ "learning_rate": 8.603076923076923e-05,
434
+ "loss": 1.1781,
435
+ "step": 1260
436
+ },
437
+ {
438
+ "epoch": 2.22,
439
+ "learning_rate": 8.233846153846154e-05,
440
+ "loss": 1.1628,
441
+ "step": 1280
442
+ },
443
+ {
444
+ "epoch": 2.26,
445
+ "learning_rate": 7.864615384615383e-05,
446
+ "loss": 1.1723,
447
+ "step": 1300
448
+ },
449
+ {
450
+ "epoch": 2.29,
451
+ "learning_rate": 7.495384615384615e-05,
452
+ "loss": 1.1578,
453
+ "step": 1320
454
+ },
455
+ {
456
+ "epoch": 2.33,
457
+ "learning_rate": 7.126153846153845e-05,
458
+ "loss": 1.1399,
459
+ "step": 1340
460
+ },
461
+ {
462
+ "epoch": 2.36,
463
+ "learning_rate": 6.756923076923077e-05,
464
+ "loss": 1.1612,
465
+ "step": 1360
466
+ },
467
+ {
468
+ "epoch": 2.4,
469
+ "learning_rate": 6.387692307692307e-05,
470
+ "loss": 1.1582,
471
+ "step": 1380
472
+ },
473
+ {
474
+ "epoch": 2.43,
475
+ "learning_rate": 6.0184615384615375e-05,
476
+ "loss": 1.1534,
477
+ "step": 1400
478
+ },
479
+ {
480
+ "epoch": 2.43,
481
+ "eval_loss": 1.2265406847000122,
482
+ "eval_runtime": 6.3176,
483
+ "eval_samples_per_second": 47.486,
484
+ "eval_steps_per_second": 1.266,
485
+ "step": 1400
486
+ },
487
+ {
488
+ "epoch": 2.47,
489
+ "learning_rate": 5.649230769230769e-05,
490
+ "loss": 1.1626,
491
+ "step": 1420
492
+ },
493
+ {
494
+ "epoch": 2.5,
495
+ "learning_rate": 5.279999999999999e-05,
496
+ "loss": 1.1619,
497
+ "step": 1440
498
+ },
499
+ {
500
+ "epoch": 2.54,
501
+ "learning_rate": 4.91076923076923e-05,
502
+ "loss": 1.168,
503
+ "step": 1460
504
+ },
505
+ {
506
+ "epoch": 2.57,
507
+ "learning_rate": 4.541538461538462e-05,
508
+ "loss": 1.1475,
509
+ "step": 1480
510
+ },
511
+ {
512
+ "epoch": 2.6,
513
+ "learning_rate": 4.172307692307692e-05,
514
+ "loss": 1.1863,
515
+ "step": 1500
516
+ },
517
+ {
518
+ "epoch": 2.64,
519
+ "learning_rate": 3.803076923076923e-05,
520
+ "loss": 1.1452,
521
+ "step": 1520
522
+ },
523
+ {
524
+ "epoch": 2.67,
525
+ "learning_rate": 3.433846153846154e-05,
526
+ "loss": 1.1555,
527
+ "step": 1540
528
+ },
529
+ {
530
+ "epoch": 2.71,
531
+ "learning_rate": 3.0646153846153845e-05,
532
+ "loss": 1.1113,
533
+ "step": 1560
534
+ },
535
+ {
536
+ "epoch": 2.74,
537
+ "learning_rate": 2.695384615384615e-05,
538
+ "loss": 1.1429,
539
+ "step": 1580
540
+ },
541
+ {
542
+ "epoch": 2.78,
543
+ "learning_rate": 2.326153846153846e-05,
544
+ "loss": 1.1609,
545
+ "step": 1600
546
+ },
547
+ {
548
+ "epoch": 2.78,
549
+ "eval_loss": 1.2222874164581299,
550
+ "eval_runtime": 6.3153,
551
+ "eval_samples_per_second": 47.504,
552
+ "eval_steps_per_second": 1.267,
553
+ "step": 1600
554
+ }
555
+ ],
556
+ "logging_steps": 20,
557
+ "max_steps": 1725,
558
+ "num_train_epochs": 3,
559
+ "save_steps": 200,
560
+ "total_flos": 3.827325072058614e+18,
561
+ "trial_name": null,
562
+ "trial_params": null
563
+ }