PEFT
Not-For-All-Audiences
kingbri commited on
Commit
7f8e339
1 Parent(s): 6527e25

Add lora files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. adapter_config.json +26 -0
  2. adapter_model.bin +3 -0
  3. checkpoint-100/README.md +21 -0
  4. checkpoint-100/adapter_config.json +26 -0
  5. checkpoint-100/adapter_model.bin +3 -0
  6. checkpoint-100/adapter_model/README.md +21 -0
  7. checkpoint-100/adapter_model/adapter_config.json +26 -0
  8. checkpoint-100/adapter_model/adapter_model.bin +3 -0
  9. checkpoint-100/optimizer.pt +3 -0
  10. checkpoint-100/rng_state_0.pth +3 -0
  11. checkpoint-100/rng_state_1.pth +3 -0
  12. checkpoint-100/rng_state_2.pth +3 -0
  13. checkpoint-100/rng_state_3.pth +3 -0
  14. checkpoint-100/rng_state_4.pth +3 -0
  15. checkpoint-100/rng_state_5.pth +3 -0
  16. checkpoint-100/rng_state_6.pth +3 -0
  17. checkpoint-100/rng_state_7.pth +3 -0
  18. checkpoint-100/scheduler.pt +3 -0
  19. checkpoint-100/trainer_state.json +635 -0
  20. checkpoint-100/training_args.bin +3 -0
  21. checkpoint-150/README.md +21 -0
  22. checkpoint-150/adapter_config.json +26 -0
  23. checkpoint-150/adapter_model.bin +3 -0
  24. checkpoint-150/adapter_model/README.md +21 -0
  25. checkpoint-150/adapter_model/adapter_config.json +26 -0
  26. checkpoint-150/adapter_model/adapter_model.bin +3 -0
  27. checkpoint-150/optimizer.pt +3 -0
  28. checkpoint-150/rng_state_0.pth +3 -0
  29. checkpoint-150/rng_state_1.pth +3 -0
  30. checkpoint-150/rng_state_2.pth +3 -0
  31. checkpoint-150/rng_state_3.pth +3 -0
  32. checkpoint-150/rng_state_4.pth +3 -0
  33. checkpoint-150/rng_state_5.pth +3 -0
  34. checkpoint-150/rng_state_6.pth +3 -0
  35. checkpoint-150/rng_state_7.pth +3 -0
  36. checkpoint-150/scheduler.pt +3 -0
  37. checkpoint-150/trainer_state.json +943 -0
  38. checkpoint-150/training_args.bin +3 -0
  39. checkpoint-200/README.md +21 -0
  40. checkpoint-200/adapter_config.json +26 -0
  41. checkpoint-200/adapter_model.bin +3 -0
  42. checkpoint-200/adapter_model/README.md +21 -0
  43. checkpoint-200/adapter_model/adapter_config.json +26 -0
  44. checkpoint-200/adapter_model/adapter_model.bin +3 -0
  45. checkpoint-200/optimizer.pt +3 -0
  46. checkpoint-200/rng_state_0.pth +3 -0
  47. checkpoint-200/rng_state_1.pth +3 -0
  48. checkpoint-200/rng_state_2.pth +3 -0
  49. checkpoint-200/rng_state_3.pth +3 -0
  50. checkpoint-200/rng_state_4.pth +3 -0
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "k_proj",
18
+ "up_proj",
19
+ "o_proj",
20
+ "gate_proj",
21
+ "q_proj",
22
+ "v_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7738c4edc7b66e957a9a9e7474a3be85403c8e4f0c0a506d2806acd4fed40d5
3
+ size 125374989
checkpoint-100/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "k_proj",
18
+ "up_proj",
19
+ "o_proj",
20
+ "gate_proj",
21
+ "q_proj",
22
+ "v_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-100/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7347e9c013e30092db854c3ac37c2ef4c96d75fcb7d34e5e89d1908990ab6ecd
3
+ size 125374989
checkpoint-100/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-100/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "k_proj",
18
+ "up_proj",
19
+ "o_proj",
20
+ "gate_proj",
21
+ "q_proj",
22
+ "v_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-100/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7347e9c013e30092db854c3ac37c2ef4c96d75fcb7d34e5e89d1908990ab6ecd
3
+ size 125374989
checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:058a5ad19149195b58aa034208fbdd62061b7b65b5390ebb1e9511728f639a05
3
+ size 250681597
checkpoint-100/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f705fa4f8554ef13cd80d123eedf6db4d4f919bd202643f96ddc18c3dd814b7
3
+ size 21687
checkpoint-100/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5f5ab19cf68a2202e4f485a33dd80f259f214096cb1aedc35f82497d399c23b
3
+ size 21687
checkpoint-100/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d83e95ec95416e9f35064e113bfb9c4945355d2485e15fc46e5e1537a116797e
3
+ size 21687
checkpoint-100/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fca50d7e35436d374671518fd1c8fb59611821f6ae0d33a22404297046ed918
3
+ size 21687
checkpoint-100/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9390d773fa037a30354bc7b4930c2aea8b97604f719eafdc6fb1fc02270476d
3
+ size 21687
checkpoint-100/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ee4b285615dbe11b31ff42bf5c128fcdb41ded83ece781479e3a09d0e6f5b92
3
+ size 21687
checkpoint-100/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc1fd611cb8a8f256e5eddd3ab6031506920951ecc59147c9e8e06d2c999828e
3
+ size 21687
checkpoint-100/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a240cfccdea4f018cd47f4a7637dad6b0634bd559c84724ec04d368401214c4
3
+ size 21687
checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b49bfe742f6b22f71c53deba157d146b3b5a2f75c884c46630a1c049d5f07826
3
+ size 627
checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.541548728942871,
3
+ "best_model_checkpoint": "./pippa-sharegpt-13b-qlora/checkpoint-100",
4
+ "epoch": 1.0554089709762533,
5
+ "eval_steps": 50,
6
+ "global_step": 100,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "learning_rate": 2e-05,
14
+ "loss": 1.9036,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.02,
19
+ "learning_rate": 4e-05,
20
+ "loss": 1.9209,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.03,
25
+ "learning_rate": 6e-05,
26
+ "loss": 1.9161,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.04,
31
+ "learning_rate": 8e-05,
32
+ "loss": 1.8722,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.05,
37
+ "learning_rate": 0.0001,
38
+ "loss": 1.866,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.06,
43
+ "learning_rate": 0.00012,
44
+ "loss": 1.872,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.07,
49
+ "learning_rate": 0.00014,
50
+ "loss": 1.9138,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.08,
55
+ "learning_rate": 0.00016,
56
+ "loss": 1.8785,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.09,
61
+ "learning_rate": 0.00018,
62
+ "loss": 1.8013,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.11,
67
+ "learning_rate": 0.0002,
68
+ "loss": 1.7859,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.12,
73
+ "learning_rate": 0.00019999332998034514,
74
+ "loss": 1.7805,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.13,
79
+ "learning_rate": 0.00019997332081116373,
80
+ "loss": 1.7716,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.14,
85
+ "learning_rate": 0.00019993997516168689,
86
+ "loss": 1.7453,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.15,
91
+ "learning_rate": 0.00019989329748023725,
92
+ "loss": 1.7306,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.16,
97
+ "learning_rate": 0.00019983329399363598,
98
+ "loss": 1.7535,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.17,
103
+ "learning_rate": 0.0001997599727063717,
104
+ "loss": 1.7811,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 0.18,
109
+ "learning_rate": 0.000199673343399533,
110
+ "loss": 1.7455,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 0.19,
115
+ "learning_rate": 0.00019957341762950344,
116
+ "loss": 1.748,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 0.2,
121
+ "learning_rate": 0.0001994602087264201,
122
+ "loss": 1.723,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 0.21,
127
+ "learning_rate": 0.00019933373179239502,
128
+ "loss": 1.7539,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 0.22,
133
+ "learning_rate": 0.000199194003699501,
134
+ "loss": 1.7293,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 0.23,
139
+ "learning_rate": 0.0001990410430875205,
140
+ "loss": 1.6857,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 0.24,
145
+ "learning_rate": 0.0001988748703614594,
146
+ "loss": 1.7209,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 0.25,
151
+ "learning_rate": 0.00019869550768882455,
152
+ "loss": 1.7127,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 0.26,
157
+ "learning_rate": 0.0001985029789966671,
158
+ "loss": 1.7024,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.27,
163
+ "learning_rate": 0.0001982973099683902,
164
+ "loss": 1.6927,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.28,
169
+ "learning_rate": 0.00019807852804032305,
170
+ "loss": 1.6584,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.3,
175
+ "learning_rate": 0.0001978466623980609,
176
+ "loss": 1.7149,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 0.31,
181
+ "learning_rate": 0.00019760174397257156,
182
+ "loss": 1.6815,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 0.32,
187
+ "learning_rate": 0.0001973438054360693,
188
+ "loss": 1.7067,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 0.33,
193
+ "learning_rate": 0.00019707288119765623,
194
+ "loss": 1.6799,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 0.34,
199
+ "learning_rate": 0.00019678900739873226,
200
+ "loss": 1.7206,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 0.35,
205
+ "learning_rate": 0.0001964922219081738,
206
+ "loss": 1.6458,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 0.36,
211
+ "learning_rate": 0.00019618256431728194,
212
+ "loss": 1.6649,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 0.37,
217
+ "learning_rate": 0.00019586007593450097,
218
+ "loss": 1.6684,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 0.38,
223
+ "learning_rate": 0.000195524799779908,
224
+ "loss": 1.7411,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 0.39,
229
+ "learning_rate": 0.00019517678057947384,
230
+ "loss": 1.6678,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 0.4,
235
+ "learning_rate": 0.0001948160647590966,
236
+ "loss": 1.6738,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 0.41,
241
+ "learning_rate": 0.00019444270043840852,
242
+ "loss": 1.6804,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 0.42,
247
+ "learning_rate": 0.00019405673742435678,
248
+ "loss": 1.6728,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 0.43,
253
+ "learning_rate": 0.00019365822720455916,
254
+ "loss": 1.6941,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 0.44,
259
+ "learning_rate": 0.00019324722294043558,
260
+ "loss": 1.6861,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 0.45,
265
+ "learning_rate": 0.00019282377946011652,
266
+ "loss": 1.6751,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 0.46,
271
+ "learning_rate": 0.0001923879532511287,
272
+ "loss": 1.7175,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 0.47,
277
+ "learning_rate": 0.00019193980245285966,
278
+ "loss": 1.6454,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 0.49,
283
+ "learning_rate": 0.0001914793868488021,
284
+ "loss": 1.6934,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 0.5,
289
+ "learning_rate": 0.0001910067678585786,
290
+ "loss": 1.6534,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 0.51,
295
+ "learning_rate": 0.00019052200852974819,
296
+ "loss": 1.6797,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 0.52,
301
+ "learning_rate": 0.00019002517352939598,
302
+ "loss": 1.6566,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 0.53,
307
+ "learning_rate": 0.00018951632913550626,
308
+ "loss": 1.657,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 0.53,
313
+ "eval_loss": 1.653587818145752,
314
+ "eval_runtime": 200.2473,
315
+ "eval_samples_per_second": 6.672,
316
+ "eval_steps_per_second": 0.21,
317
+ "step": 50
318
+ },
319
+ {
320
+ "epoch": 0.54,
321
+ "learning_rate": 0.0001889955432281212,
322
+ "loss": 1.6736,
323
+ "step": 51
324
+ },
325
+ {
326
+ "epoch": 0.55,
327
+ "learning_rate": 0.00018846288528028555,
328
+ "loss": 1.6696,
329
+ "step": 52
330
+ },
331
+ {
332
+ "epoch": 0.56,
333
+ "learning_rate": 0.00018791842634877898,
334
+ "loss": 1.6429,
335
+ "step": 53
336
+ },
337
+ {
338
+ "epoch": 0.57,
339
+ "learning_rate": 0.00018736223906463696,
340
+ "loss": 1.608,
341
+ "step": 54
342
+ },
343
+ {
344
+ "epoch": 0.58,
345
+ "learning_rate": 0.00018679439762346185,
346
+ "loss": 1.6245,
347
+ "step": 55
348
+ },
349
+ {
350
+ "epoch": 0.59,
351
+ "learning_rate": 0.00018621497777552507,
352
+ "loss": 1.6199,
353
+ "step": 56
354
+ },
355
+ {
356
+ "epoch": 0.6,
357
+ "learning_rate": 0.00018562405681566216,
358
+ "loss": 1.7028,
359
+ "step": 57
360
+ },
361
+ {
362
+ "epoch": 0.61,
363
+ "learning_rate": 0.00018502171357296144,
364
+ "loss": 1.6372,
365
+ "step": 58
366
+ },
367
+ {
368
+ "epoch": 0.62,
369
+ "learning_rate": 0.00018440802840024822,
370
+ "loss": 1.6198,
371
+ "step": 59
372
+ },
373
+ {
374
+ "epoch": 0.63,
375
+ "learning_rate": 0.00018378308316336584,
376
+ "loss": 1.6061,
377
+ "step": 60
378
+ },
379
+ {
380
+ "epoch": 0.64,
381
+ "learning_rate": 0.00018314696123025454,
382
+ "loss": 1.6051,
383
+ "step": 61
384
+ },
385
+ {
386
+ "epoch": 0.65,
387
+ "learning_rate": 0.00018249974745983023,
388
+ "loss": 1.6257,
389
+ "step": 62
390
+ },
391
+ {
392
+ "epoch": 0.66,
393
+ "learning_rate": 0.00018184152819066435,
394
+ "loss": 1.6295,
395
+ "step": 63
396
+ },
397
+ {
398
+ "epoch": 0.68,
399
+ "learning_rate": 0.00018117239122946615,
400
+ "loss": 1.6382,
401
+ "step": 64
402
+ },
403
+ {
404
+ "epoch": 0.69,
405
+ "learning_rate": 0.0001804924258393692,
406
+ "loss": 1.6032,
407
+ "step": 65
408
+ },
409
+ {
410
+ "epoch": 0.7,
411
+ "learning_rate": 0.000179801722728024,
412
+ "loss": 1.652,
413
+ "step": 66
414
+ },
415
+ {
416
+ "epoch": 0.71,
417
+ "learning_rate": 0.00017910037403549693,
418
+ "loss": 1.6431,
419
+ "step": 67
420
+ },
421
+ {
422
+ "epoch": 0.72,
423
+ "learning_rate": 0.00017838847332197938,
424
+ "loss": 1.6482,
425
+ "step": 68
426
+ },
427
+ {
428
+ "epoch": 0.73,
429
+ "learning_rate": 0.00017766611555530636,
430
+ "loss": 1.6276,
431
+ "step": 69
432
+ },
433
+ {
434
+ "epoch": 0.74,
435
+ "learning_rate": 0.00017693339709828792,
436
+ "loss": 1.611,
437
+ "step": 70
438
+ },
439
+ {
440
+ "epoch": 0.75,
441
+ "learning_rate": 0.00017619041569585418,
442
+ "loss": 1.6309,
443
+ "step": 71
444
+ },
445
+ {
446
+ "epoch": 0.76,
447
+ "learning_rate": 0.0001754372704620164,
448
+ "loss": 1.6316,
449
+ "step": 72
450
+ },
451
+ {
452
+ "epoch": 0.77,
453
+ "learning_rate": 0.00017467406186664474,
454
+ "loss": 1.5779,
455
+ "step": 73
456
+ },
457
+ {
458
+ "epoch": 0.78,
459
+ "learning_rate": 0.00017390089172206592,
460
+ "loss": 1.6015,
461
+ "step": 74
462
+ },
463
+ {
464
+ "epoch": 0.79,
465
+ "learning_rate": 0.0001731178631694811,
466
+ "loss": 1.6133,
467
+ "step": 75
468
+ },
469
+ {
470
+ "epoch": 0.8,
471
+ "learning_rate": 0.00017232508066520702,
472
+ "loss": 1.6048,
473
+ "step": 76
474
+ },
475
+ {
476
+ "epoch": 0.81,
477
+ "learning_rate": 0.00017152264996674136,
478
+ "loss": 1.5792,
479
+ "step": 77
480
+ },
481
+ {
482
+ "epoch": 0.82,
483
+ "learning_rate": 0.00017071067811865476,
484
+ "loss": 1.5756,
485
+ "step": 78
486
+ },
487
+ {
488
+ "epoch": 0.83,
489
+ "learning_rate": 0.00016988927343831095,
490
+ "loss": 1.5497,
491
+ "step": 79
492
+ },
493
+ {
494
+ "epoch": 0.84,
495
+ "learning_rate": 0.00016905854550141716,
496
+ "loss": 1.5604,
497
+ "step": 80
498
+ },
499
+ {
500
+ "epoch": 0.85,
501
+ "learning_rate": 0.00016821860512740671,
502
+ "loss": 1.5722,
503
+ "step": 81
504
+ },
505
+ {
506
+ "epoch": 0.87,
507
+ "learning_rate": 0.00016736956436465573,
508
+ "loss": 1.5943,
509
+ "step": 82
510
+ },
511
+ {
512
+ "epoch": 0.88,
513
+ "learning_rate": 0.00016651153647553567,
514
+ "loss": 1.538,
515
+ "step": 83
516
+ },
517
+ {
518
+ "epoch": 0.89,
519
+ "learning_rate": 0.00016564463592130428,
520
+ "loss": 1.5875,
521
+ "step": 84
522
+ },
523
+ {
524
+ "epoch": 0.9,
525
+ "learning_rate": 0.0001647689783468362,
526
+ "loss": 1.5835,
527
+ "step": 85
528
+ },
529
+ {
530
+ "epoch": 0.91,
531
+ "learning_rate": 0.00016388468056519612,
532
+ "loss": 1.5724,
533
+ "step": 86
534
+ },
535
+ {
536
+ "epoch": 0.92,
537
+ "learning_rate": 0.00016299186054205577,
538
+ "loss": 1.573,
539
+ "step": 87
540
+ },
541
+ {
542
+ "epoch": 0.93,
543
+ "learning_rate": 0.00016209063737995715,
544
+ "loss": 1.543,
545
+ "step": 88
546
+ },
547
+ {
548
+ "epoch": 0.94,
549
+ "learning_rate": 0.00016118113130242432,
550
+ "loss": 1.5438,
551
+ "step": 89
552
+ },
553
+ {
554
+ "epoch": 0.95,
555
+ "learning_rate": 0.00016026346363792567,
556
+ "loss": 1.5711,
557
+ "step": 90
558
+ },
559
+ {
560
+ "epoch": 0.96,
561
+ "learning_rate": 0.00015933775680368822,
562
+ "loss": 1.5542,
563
+ "step": 91
564
+ },
565
+ {
566
+ "epoch": 0.97,
567
+ "learning_rate": 0.00015840413428936767,
568
+ "loss": 1.5209,
569
+ "step": 92
570
+ },
571
+ {
572
+ "epoch": 0.98,
573
+ "learning_rate": 0.0001574627206405744,
574
+ "loss": 1.569,
575
+ "step": 93
576
+ },
577
+ {
578
+ "epoch": 0.99,
579
+ "learning_rate": 0.0001565136414422592,
580
+ "loss": 1.5657,
581
+ "step": 94
582
+ },
583
+ {
584
+ "epoch": 1.0,
585
+ "learning_rate": 0.00015555702330196023,
586
+ "loss": 1.5248,
587
+ "step": 95
588
+ },
589
+ {
590
+ "epoch": 1.01,
591
+ "learning_rate": 0.00015459299383291345,
592
+ "loss": 1.5482,
593
+ "step": 96
594
+ },
595
+ {
596
+ "epoch": 1.02,
597
+ "learning_rate": 0.000153621681637029,
598
+ "loss": 1.5186,
599
+ "step": 97
600
+ },
601
+ {
602
+ "epoch": 1.03,
603
+ "learning_rate": 0.0001526432162877356,
604
+ "loss": 1.5363,
605
+ "step": 98
606
+ },
607
+ {
608
+ "epoch": 1.04,
609
+ "learning_rate": 0.00015165772831269547,
610
+ "loss": 1.5221,
611
+ "step": 99
612
+ },
613
+ {
614
+ "epoch": 1.06,
615
+ "learning_rate": 0.00015066534917639195,
616
+ "loss": 1.5071,
617
+ "step": 100
618
+ },
619
+ {
620
+ "epoch": 1.06,
621
+ "eval_loss": 1.541548728942871,
622
+ "eval_runtime": 200.1636,
623
+ "eval_samples_per_second": 6.675,
624
+ "eval_steps_per_second": 0.21,
625
+ "step": 100
626
+ }
627
+ ],
628
+ "logging_steps": 1,
629
+ "max_steps": 282,
630
+ "num_train_epochs": 3,
631
+ "save_steps": 50,
632
+ "total_flos": 1.029096231126696e+18,
633
+ "trial_name": null,
634
+ "trial_params": null
635
+ }
checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8bdde5c34937c67d70adb2cea431eda05b8cf22eb5ebad425d871a634a8fc84
3
+ size 4411
checkpoint-150/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-150/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "k_proj",
18
+ "up_proj",
19
+ "o_proj",
20
+ "gate_proj",
21
+ "q_proj",
22
+ "v_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-150/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6282741cfdfa834dd0b8cd367070812ab0cc5a75ea9d41ef6dba922444c9625
3
+ size 125374989
checkpoint-150/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-150/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "k_proj",
18
+ "up_proj",
19
+ "o_proj",
20
+ "gate_proj",
21
+ "q_proj",
22
+ "v_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-150/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6282741cfdfa834dd0b8cd367070812ab0cc5a75ea9d41ef6dba922444c9625
3
+ size 125374989
checkpoint-150/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce6dcd2983bc5a32bd302a66647bda032b0572b986f4904bd4543d6d2d43b067
3
+ size 250681597
checkpoint-150/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d209f7a1508a25c574e4c3d69a27ca41cba4cc2058612304668b6f5e97ab3d2
3
+ size 21687
checkpoint-150/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c15ad3c1a375d56daf983bf7705ee84d266d43306e2844bcbb660fc1cc0ca51
3
+ size 21687
checkpoint-150/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e6020aae6bf4cd66952300aaec4db5e3bc2938e89d85342eb6776cbb021072d
3
+ size 21687
checkpoint-150/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40fc11f613f0cc52561363af8adcdefc232c2ece7688652a0073fa4f9ab6e6c0
3
+ size 21687
checkpoint-150/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a03a4a7816676c3ba781d35b7283b034c62c299d4d29b637df3f2c31a25a2fe
3
+ size 21687
checkpoint-150/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37063ab98ea8f2f61c649feaee1aca9a1cddb2dc025aa359bbb47a7ed22e7955
3
+ size 21687
checkpoint-150/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6277e18656ac27e03e4b3090eac4beefadf98706b399a7a20e75b54b1c2696e
3
+ size 21687
checkpoint-150/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da8748808b2d5076a715a9ba94e767ae7a62e26de5bb5b4cb33229d479203300
3
+ size 21687
checkpoint-150/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db732cfc752aa95d11e53afbebc7fa041457f5539315097963802e4d8585d404
3
+ size 627
checkpoint-150/trainer_state.json ADDED
@@ -0,0 +1,943 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.4968100786209106,
3
+ "best_model_checkpoint": "./pippa-sharegpt-13b-qlora/checkpoint-150",
4
+ "epoch": 1.58311345646438,
5
+ "eval_steps": 50,
6
+ "global_step": 150,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "learning_rate": 2e-05,
14
+ "loss": 1.9036,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.02,
19
+ "learning_rate": 4e-05,
20
+ "loss": 1.9209,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.03,
25
+ "learning_rate": 6e-05,
26
+ "loss": 1.9161,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.04,
31
+ "learning_rate": 8e-05,
32
+ "loss": 1.8722,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.05,
37
+ "learning_rate": 0.0001,
38
+ "loss": 1.866,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.06,
43
+ "learning_rate": 0.00012,
44
+ "loss": 1.872,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.07,
49
+ "learning_rate": 0.00014,
50
+ "loss": 1.9138,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.08,
55
+ "learning_rate": 0.00016,
56
+ "loss": 1.8785,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.09,
61
+ "learning_rate": 0.00018,
62
+ "loss": 1.8013,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.11,
67
+ "learning_rate": 0.0002,
68
+ "loss": 1.7859,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.12,
73
+ "learning_rate": 0.00019999332998034514,
74
+ "loss": 1.7805,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.13,
79
+ "learning_rate": 0.00019997332081116373,
80
+ "loss": 1.7716,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.14,
85
+ "learning_rate": 0.00019993997516168689,
86
+ "loss": 1.7453,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.15,
91
+ "learning_rate": 0.00019989329748023725,
92
+ "loss": 1.7306,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.16,
97
+ "learning_rate": 0.00019983329399363598,
98
+ "loss": 1.7535,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.17,
103
+ "learning_rate": 0.0001997599727063717,
104
+ "loss": 1.7811,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 0.18,
109
+ "learning_rate": 0.000199673343399533,
110
+ "loss": 1.7455,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 0.19,
115
+ "learning_rate": 0.00019957341762950344,
116
+ "loss": 1.748,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 0.2,
121
+ "learning_rate": 0.0001994602087264201,
122
+ "loss": 1.723,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 0.21,
127
+ "learning_rate": 0.00019933373179239502,
128
+ "loss": 1.7539,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 0.22,
133
+ "learning_rate": 0.000199194003699501,
134
+ "loss": 1.7293,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 0.23,
139
+ "learning_rate": 0.0001990410430875205,
140
+ "loss": 1.6857,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 0.24,
145
+ "learning_rate": 0.0001988748703614594,
146
+ "loss": 1.7209,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 0.25,
151
+ "learning_rate": 0.00019869550768882455,
152
+ "loss": 1.7127,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 0.26,
157
+ "learning_rate": 0.0001985029789966671,
158
+ "loss": 1.7024,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.27,
163
+ "learning_rate": 0.0001982973099683902,
164
+ "loss": 1.6927,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.28,
169
+ "learning_rate": 0.00019807852804032305,
170
+ "loss": 1.6584,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.3,
175
+ "learning_rate": 0.0001978466623980609,
176
+ "loss": 1.7149,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 0.31,
181
+ "learning_rate": 0.00019760174397257156,
182
+ "loss": 1.6815,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 0.32,
187
+ "learning_rate": 0.0001973438054360693,
188
+ "loss": 1.7067,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 0.33,
193
+ "learning_rate": 0.00019707288119765623,
194
+ "loss": 1.6799,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 0.34,
199
+ "learning_rate": 0.00019678900739873226,
200
+ "loss": 1.7206,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 0.35,
205
+ "learning_rate": 0.0001964922219081738,
206
+ "loss": 1.6458,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 0.36,
211
+ "learning_rate": 0.00019618256431728194,
212
+ "loss": 1.6649,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 0.37,
217
+ "learning_rate": 0.00019586007593450097,
218
+ "loss": 1.6684,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 0.38,
223
+ "learning_rate": 0.000195524799779908,
224
+ "loss": 1.7411,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 0.39,
229
+ "learning_rate": 0.00019517678057947384,
230
+ "loss": 1.6678,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 0.4,
235
+ "learning_rate": 0.0001948160647590966,
236
+ "loss": 1.6738,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 0.41,
241
+ "learning_rate": 0.00019444270043840852,
242
+ "loss": 1.6804,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 0.42,
247
+ "learning_rate": 0.00019405673742435678,
248
+ "loss": 1.6728,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 0.43,
253
+ "learning_rate": 0.00019365822720455916,
254
+ "loss": 1.6941,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 0.44,
259
+ "learning_rate": 0.00019324722294043558,
260
+ "loss": 1.6861,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 0.45,
265
+ "learning_rate": 0.00019282377946011652,
266
+ "loss": 1.6751,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 0.46,
271
+ "learning_rate": 0.0001923879532511287,
272
+ "loss": 1.7175,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 0.47,
277
+ "learning_rate": 0.00019193980245285966,
278
+ "loss": 1.6454,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 0.49,
283
+ "learning_rate": 0.0001914793868488021,
284
+ "loss": 1.6934,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 0.5,
289
+ "learning_rate": 0.0001910067678585786,
290
+ "loss": 1.6534,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 0.51,
295
+ "learning_rate": 0.00019052200852974819,
296
+ "loss": 1.6797,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 0.52,
301
+ "learning_rate": 0.00019002517352939598,
302
+ "loss": 1.6566,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 0.53,
307
+ "learning_rate": 0.00018951632913550626,
308
+ "loss": 1.657,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 0.53,
313
+ "eval_loss": 1.653587818145752,
314
+ "eval_runtime": 200.2473,
315
+ "eval_samples_per_second": 6.672,
316
+ "eval_steps_per_second": 0.21,
317
+ "step": 50
318
+ },
319
+ {
320
+ "epoch": 0.54,
321
+ "learning_rate": 0.0001889955432281212,
322
+ "loss": 1.6736,
323
+ "step": 51
324
+ },
325
+ {
326
+ "epoch": 0.55,
327
+ "learning_rate": 0.00018846288528028555,
328
+ "loss": 1.6696,
329
+ "step": 52
330
+ },
331
+ {
332
+ "epoch": 0.56,
333
+ "learning_rate": 0.00018791842634877898,
334
+ "loss": 1.6429,
335
+ "step": 53
336
+ },
337
+ {
338
+ "epoch": 0.57,
339
+ "learning_rate": 0.00018736223906463696,
340
+ "loss": 1.608,
341
+ "step": 54
342
+ },
343
+ {
344
+ "epoch": 0.58,
345
+ "learning_rate": 0.00018679439762346185,
346
+ "loss": 1.6245,
347
+ "step": 55
348
+ },
349
+ {
350
+ "epoch": 0.59,
351
+ "learning_rate": 0.00018621497777552507,
352
+ "loss": 1.6199,
353
+ "step": 56
354
+ },
355
+ {
356
+ "epoch": 0.6,
357
+ "learning_rate": 0.00018562405681566216,
358
+ "loss": 1.7028,
359
+ "step": 57
360
+ },
361
+ {
362
+ "epoch": 0.61,
363
+ "learning_rate": 0.00018502171357296144,
364
+ "loss": 1.6372,
365
+ "step": 58
366
+ },
367
+ {
368
+ "epoch": 0.62,
369
+ "learning_rate": 0.00018440802840024822,
370
+ "loss": 1.6198,
371
+ "step": 59
372
+ },
373
+ {
374
+ "epoch": 0.63,
375
+ "learning_rate": 0.00018378308316336584,
376
+ "loss": 1.6061,
377
+ "step": 60
378
+ },
379
+ {
380
+ "epoch": 0.64,
381
+ "learning_rate": 0.00018314696123025454,
382
+ "loss": 1.6051,
383
+ "step": 61
384
+ },
385
+ {
386
+ "epoch": 0.65,
387
+ "learning_rate": 0.00018249974745983023,
388
+ "loss": 1.6257,
389
+ "step": 62
390
+ },
391
+ {
392
+ "epoch": 0.66,
393
+ "learning_rate": 0.00018184152819066435,
394
+ "loss": 1.6295,
395
+ "step": 63
396
+ },
397
+ {
398
+ "epoch": 0.68,
399
+ "learning_rate": 0.00018117239122946615,
400
+ "loss": 1.6382,
401
+ "step": 64
402
+ },
403
+ {
404
+ "epoch": 0.69,
405
+ "learning_rate": 0.0001804924258393692,
406
+ "loss": 1.6032,
407
+ "step": 65
408
+ },
409
+ {
410
+ "epoch": 0.7,
411
+ "learning_rate": 0.000179801722728024,
412
+ "loss": 1.652,
413
+ "step": 66
414
+ },
415
+ {
416
+ "epoch": 0.71,
417
+ "learning_rate": 0.00017910037403549693,
418
+ "loss": 1.6431,
419
+ "step": 67
420
+ },
421
+ {
422
+ "epoch": 0.72,
423
+ "learning_rate": 0.00017838847332197938,
424
+ "loss": 1.6482,
425
+ "step": 68
426
+ },
427
+ {
428
+ "epoch": 0.73,
429
+ "learning_rate": 0.00017766611555530636,
430
+ "loss": 1.6276,
431
+ "step": 69
432
+ },
433
+ {
434
+ "epoch": 0.74,
435
+ "learning_rate": 0.00017693339709828792,
436
+ "loss": 1.611,
437
+ "step": 70
438
+ },
439
+ {
440
+ "epoch": 0.75,
441
+ "learning_rate": 0.00017619041569585418,
442
+ "loss": 1.6309,
443
+ "step": 71
444
+ },
445
+ {
446
+ "epoch": 0.76,
447
+ "learning_rate": 0.0001754372704620164,
448
+ "loss": 1.6316,
449
+ "step": 72
450
+ },
451
+ {
452
+ "epoch": 0.77,
453
+ "learning_rate": 0.00017467406186664474,
454
+ "loss": 1.5779,
455
+ "step": 73
456
+ },
457
+ {
458
+ "epoch": 0.78,
459
+ "learning_rate": 0.00017390089172206592,
460
+ "loss": 1.6015,
461
+ "step": 74
462
+ },
463
+ {
464
+ "epoch": 0.79,
465
+ "learning_rate": 0.0001731178631694811,
466
+ "loss": 1.6133,
467
+ "step": 75
468
+ },
469
+ {
470
+ "epoch": 0.8,
471
+ "learning_rate": 0.00017232508066520702,
472
+ "loss": 1.6048,
473
+ "step": 76
474
+ },
475
+ {
476
+ "epoch": 0.81,
477
+ "learning_rate": 0.00017152264996674136,
478
+ "loss": 1.5792,
479
+ "step": 77
480
+ },
481
+ {
482
+ "epoch": 0.82,
483
+ "learning_rate": 0.00017071067811865476,
484
+ "loss": 1.5756,
485
+ "step": 78
486
+ },
487
+ {
488
+ "epoch": 0.83,
489
+ "learning_rate": 0.00016988927343831095,
490
+ "loss": 1.5497,
491
+ "step": 79
492
+ },
493
+ {
494
+ "epoch": 0.84,
495
+ "learning_rate": 0.00016905854550141716,
496
+ "loss": 1.5604,
497
+ "step": 80
498
+ },
499
+ {
500
+ "epoch": 0.85,
501
+ "learning_rate": 0.00016821860512740671,
502
+ "loss": 1.5722,
503
+ "step": 81
504
+ },
505
+ {
506
+ "epoch": 0.87,
507
+ "learning_rate": 0.00016736956436465573,
508
+ "loss": 1.5943,
509
+ "step": 82
510
+ },
511
+ {
512
+ "epoch": 0.88,
513
+ "learning_rate": 0.00016651153647553567,
514
+ "loss": 1.538,
515
+ "step": 83
516
+ },
517
+ {
518
+ "epoch": 0.89,
519
+ "learning_rate": 0.00016564463592130428,
520
+ "loss": 1.5875,
521
+ "step": 84
522
+ },
523
+ {
524
+ "epoch": 0.9,
525
+ "learning_rate": 0.0001647689783468362,
526
+ "loss": 1.5835,
527
+ "step": 85
528
+ },
529
+ {
530
+ "epoch": 0.91,
531
+ "learning_rate": 0.00016388468056519612,
532
+ "loss": 1.5724,
533
+ "step": 86
534
+ },
535
+ {
536
+ "epoch": 0.92,
537
+ "learning_rate": 0.00016299186054205577,
538
+ "loss": 1.573,
539
+ "step": 87
540
+ },
541
+ {
542
+ "epoch": 0.93,
543
+ "learning_rate": 0.00016209063737995715,
544
+ "loss": 1.543,
545
+ "step": 88
546
+ },
547
+ {
548
+ "epoch": 0.94,
549
+ "learning_rate": 0.00016118113130242432,
550
+ "loss": 1.5438,
551
+ "step": 89
552
+ },
553
+ {
554
+ "epoch": 0.95,
555
+ "learning_rate": 0.00016026346363792567,
556
+ "loss": 1.5711,
557
+ "step": 90
558
+ },
559
+ {
560
+ "epoch": 0.96,
561
+ "learning_rate": 0.00015933775680368822,
562
+ "loss": 1.5542,
563
+ "step": 91
564
+ },
565
+ {
566
+ "epoch": 0.97,
567
+ "learning_rate": 0.00015840413428936767,
568
+ "loss": 1.5209,
569
+ "step": 92
570
+ },
571
+ {
572
+ "epoch": 0.98,
573
+ "learning_rate": 0.0001574627206405744,
574
+ "loss": 1.569,
575
+ "step": 93
576
+ },
577
+ {
578
+ "epoch": 0.99,
579
+ "learning_rate": 0.0001565136414422592,
580
+ "loss": 1.5657,
581
+ "step": 94
582
+ },
583
+ {
584
+ "epoch": 1.0,
585
+ "learning_rate": 0.00015555702330196023,
586
+ "loss": 1.5248,
587
+ "step": 95
588
+ },
589
+ {
590
+ "epoch": 1.01,
591
+ "learning_rate": 0.00015459299383291345,
592
+ "loss": 1.5482,
593
+ "step": 96
594
+ },
595
+ {
596
+ "epoch": 1.02,
597
+ "learning_rate": 0.000153621681637029,
598
+ "loss": 1.5186,
599
+ "step": 97
600
+ },
601
+ {
602
+ "epoch": 1.03,
603
+ "learning_rate": 0.0001526432162877356,
604
+ "loss": 1.5363,
605
+ "step": 98
606
+ },
607
+ {
608
+ "epoch": 1.04,
609
+ "learning_rate": 0.00015165772831269547,
610
+ "loss": 1.5221,
611
+ "step": 99
612
+ },
613
+ {
614
+ "epoch": 1.06,
615
+ "learning_rate": 0.00015066534917639195,
616
+ "loss": 1.5071,
617
+ "step": 100
618
+ },
619
+ {
620
+ "epoch": 1.06,
621
+ "eval_loss": 1.541548728942871,
622
+ "eval_runtime": 200.1636,
623
+ "eval_samples_per_second": 6.675,
624
+ "eval_steps_per_second": 0.21,
625
+ "step": 100
626
+ },
627
+ {
628
+ "epoch": 1.07,
629
+ "learning_rate": 0.00014966621126259183,
630
+ "loss": 1.5179,
631
+ "step": 101
632
+ },
633
+ {
634
+ "epoch": 1.08,
635
+ "learning_rate": 0.00014866044785668563,
636
+ "loss": 1.4923,
637
+ "step": 102
638
+ },
639
+ {
640
+ "epoch": 1.09,
641
+ "learning_rate": 0.00014764819312790707,
642
+ "loss": 1.5733,
643
+ "step": 103
644
+ },
645
+ {
646
+ "epoch": 1.1,
647
+ "learning_rate": 0.0001466295821114348,
648
+ "loss": 1.4586,
649
+ "step": 104
650
+ },
651
+ {
652
+ "epoch": 1.11,
653
+ "learning_rate": 0.00014560475069037894,
654
+ "loss": 1.5379,
655
+ "step": 105
656
+ },
657
+ {
658
+ "epoch": 1.12,
659
+ "learning_rate": 0.00014457383557765386,
660
+ "loss": 1.5059,
661
+ "step": 106
662
+ },
663
+ {
664
+ "epoch": 1.13,
665
+ "learning_rate": 0.00014353697429774084,
666
+ "loss": 1.512,
667
+ "step": 107
668
+ },
669
+ {
670
+ "epoch": 1.14,
671
+ "learning_rate": 0.0001424943051683422,
672
+ "loss": 1.4908,
673
+ "step": 108
674
+ },
675
+ {
676
+ "epoch": 1.15,
677
+ "learning_rate": 0.0001414459672819297,
678
+ "loss": 1.5067,
679
+ "step": 109
680
+ },
681
+ {
682
+ "epoch": 1.16,
683
+ "learning_rate": 0.00014039210048718949,
684
+ "loss": 1.4908,
685
+ "step": 110
686
+ },
687
+ {
688
+ "epoch": 1.17,
689
+ "learning_rate": 0.00013933284537036625,
690
+ "loss": 1.5299,
691
+ "step": 111
692
+ },
693
+ {
694
+ "epoch": 1.18,
695
+ "learning_rate": 0.000138268343236509,
696
+ "loss": 1.4836,
697
+ "step": 112
698
+ },
699
+ {
700
+ "epoch": 1.19,
701
+ "learning_rate": 0.00013719873609062077,
702
+ "loss": 1.5285,
703
+ "step": 113
704
+ },
705
+ {
706
+ "epoch": 1.2,
707
+ "learning_rate": 0.00013612416661871533,
708
+ "loss": 1.5493,
709
+ "step": 114
710
+ },
711
+ {
712
+ "epoch": 1.21,
713
+ "learning_rate": 0.0001350447781687826,
714
+ "loss": 1.4915,
715
+ "step": 115
716
+ },
717
+ {
718
+ "epoch": 1.22,
719
+ "learning_rate": 0.00013396071473166613,
720
+ "loss": 1.5139,
721
+ "step": 116
722
+ },
723
+ {
724
+ "epoch": 1.23,
725
+ "learning_rate": 0.00013287212092185464,
726
+ "loss": 1.4589,
727
+ "step": 117
728
+ },
729
+ {
730
+ "epoch": 1.25,
731
+ "learning_rate": 0.00013177914195819016,
732
+ "loss": 1.4869,
733
+ "step": 118
734
+ },
735
+ {
736
+ "epoch": 1.26,
737
+ "learning_rate": 0.00013068192364449618,
738
+ "loss": 1.5081,
739
+ "step": 119
740
+ },
741
+ {
742
+ "epoch": 1.27,
743
+ "learning_rate": 0.00012958061235012706,
744
+ "loss": 1.5172,
745
+ "step": 120
746
+ },
747
+ {
748
+ "epoch": 1.28,
749
+ "learning_rate": 0.0001284753549904423,
750
+ "loss": 1.5195,
751
+ "step": 121
752
+ },
753
+ {
754
+ "epoch": 1.29,
755
+ "learning_rate": 0.0001273662990072083,
756
+ "loss": 1.4635,
757
+ "step": 122
758
+ },
759
+ {
760
+ "epoch": 1.3,
761
+ "learning_rate": 0.00012625359234892907,
762
+ "loss": 1.5036,
763
+ "step": 123
764
+ },
765
+ {
766
+ "epoch": 1.31,
767
+ "learning_rate": 0.0001251373834511103,
768
+ "loss": 1.5203,
769
+ "step": 124
770
+ },
771
+ {
772
+ "epoch": 1.32,
773
+ "learning_rate": 0.00012401782121645766,
774
+ "loss": 1.5047,
775
+ "step": 125
776
+ },
777
+ {
778
+ "epoch": 1.33,
779
+ "learning_rate": 0.0001228950549950134,
780
+ "loss": 1.5047,
781
+ "step": 126
782
+ },
783
+ {
784
+ "epoch": 1.34,
785
+ "learning_rate": 0.00012176923456423284,
786
+ "loss": 1.4715,
787
+ "step": 127
788
+ },
789
+ {
790
+ "epoch": 1.35,
791
+ "learning_rate": 0.00012064051010900397,
792
+ "loss": 1.4884,
793
+ "step": 128
794
+ },
795
+ {
796
+ "epoch": 1.36,
797
+ "learning_rate": 0.00011950903220161285,
798
+ "loss": 1.4947,
799
+ "step": 129
800
+ },
801
+ {
802
+ "epoch": 1.37,
803
+ "learning_rate": 0.00011837495178165706,
804
+ "loss": 1.4665,
805
+ "step": 130
806
+ },
807
+ {
808
+ "epoch": 1.38,
809
+ "learning_rate": 0.00011723842013591044,
810
+ "loss": 1.4888,
811
+ "step": 131
812
+ },
813
+ {
814
+ "epoch": 1.39,
815
+ "learning_rate": 0.00011609958887814129,
816
+ "loss": 1.52,
817
+ "step": 132
818
+ },
819
+ {
820
+ "epoch": 1.4,
821
+ "learning_rate": 0.00011495860992888712,
822
+ "loss": 1.4305,
823
+ "step": 133
824
+ },
825
+ {
826
+ "epoch": 1.41,
827
+ "learning_rate": 0.00011381563549518823,
828
+ "loss": 1.4865,
829
+ "step": 134
830
+ },
831
+ {
832
+ "epoch": 1.42,
833
+ "learning_rate": 0.00011267081805028339,
834
+ "loss": 1.5155,
835
+ "step": 135
836
+ },
837
+ {
838
+ "epoch": 1.44,
839
+ "learning_rate": 0.00011152431031326978,
840
+ "loss": 1.4952,
841
+ "step": 136
842
+ },
843
+ {
844
+ "epoch": 1.45,
845
+ "learning_rate": 0.00011037626522873019,
846
+ "loss": 1.4765,
847
+ "step": 137
848
+ },
849
+ {
850
+ "epoch": 1.46,
851
+ "learning_rate": 0.00010922683594633021,
852
+ "loss": 1.4729,
853
+ "step": 138
854
+ },
855
+ {
856
+ "epoch": 1.47,
857
+ "learning_rate": 0.00010807617580038796,
858
+ "loss": 1.481,
859
+ "step": 139
860
+ },
861
+ {
862
+ "epoch": 1.48,
863
+ "learning_rate": 0.00010692443828941918,
864
+ "loss": 1.4877,
865
+ "step": 140
866
+ },
867
+ {
868
+ "epoch": 1.49,
869
+ "learning_rate": 0.00010577177705566061,
870
+ "loss": 1.4884,
871
+ "step": 141
872
+ },
873
+ {
874
+ "epoch": 1.5,
875
+ "learning_rate": 0.00010461834586457398,
876
+ "loss": 1.4786,
877
+ "step": 142
878
+ },
879
+ {
880
+ "epoch": 1.51,
881
+ "learning_rate": 0.00010346429858433352,
882
+ "loss": 1.4585,
883
+ "step": 143
884
+ },
885
+ {
886
+ "epoch": 1.52,
887
+ "learning_rate": 0.00010230978916530012,
888
+ "loss": 1.4867,
889
+ "step": 144
890
+ },
891
+ {
892
+ "epoch": 1.53,
893
+ "learning_rate": 0.00010115497161948409,
894
+ "loss": 1.487,
895
+ "step": 145
896
+ },
897
+ {
898
+ "epoch": 1.54,
899
+ "learning_rate": 0.0001,
900
+ "loss": 1.4693,
901
+ "step": 146
902
+ },
903
+ {
904
+ "epoch": 1.55,
905
+ "learning_rate": 9.884502838051595e-05,
906
+ "loss": 1.4391,
907
+ "step": 147
908
+ },
909
+ {
910
+ "epoch": 1.56,
911
+ "learning_rate": 9.76902108346999e-05,
912
+ "loss": 1.493,
913
+ "step": 148
914
+ },
915
+ {
916
+ "epoch": 1.57,
917
+ "learning_rate": 9.653570141566653e-05,
918
+ "loss": 1.5152,
919
+ "step": 149
920
+ },
921
+ {
922
+ "epoch": 1.58,
923
+ "learning_rate": 9.538165413542607e-05,
924
+ "loss": 1.4766,
925
+ "step": 150
926
+ },
927
+ {
928
+ "epoch": 1.58,
929
+ "eval_loss": 1.4968100786209106,
930
+ "eval_runtime": 199.9343,
931
+ "eval_samples_per_second": 6.682,
932
+ "eval_steps_per_second": 0.21,
933
+ "step": 150
934
+ }
935
+ ],
936
+ "logging_steps": 1,
937
+ "max_steps": 282,
938
+ "num_train_epochs": 3,
939
+ "save_steps": 50,
940
+ "total_flos": 1.546341313689944e+18,
941
+ "trial_name": null,
942
+ "trial_params": null
943
+ }
checkpoint-150/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8bdde5c34937c67d70adb2cea431eda05b8cf22eb5ebad425d871a634a8fc84
3
+ size 4411
checkpoint-200/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-200/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "k_proj",
18
+ "up_proj",
19
+ "o_proj",
20
+ "gate_proj",
21
+ "q_proj",
22
+ "v_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-200/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42245ccd0d6a53485c7df59d7bda13e2c242bfa3855a784c1e3edba0d804af5e
3
+ size 125374989
checkpoint-200/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-200/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "k_proj",
18
+ "up_proj",
19
+ "o_proj",
20
+ "gate_proj",
21
+ "q_proj",
22
+ "v_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-200/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42245ccd0d6a53485c7df59d7bda13e2c242bfa3855a784c1e3edba0d804af5e
3
+ size 125374989
checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddd52d3cb770a9644ca58a2924034ca29068888aa68d37df9d534df5a615d4bf
3
+ size 250681597
checkpoint-200/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a75a658d44fe38598390ec155c1a51d8fe99b3431b0a3b96be8d88bb3f3cb1b
3
+ size 21687
checkpoint-200/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:562b30f364e08187db5d3919ecd53271f4d3095698fba5dc692c4ed9cabe8b03
3
+ size 21687
checkpoint-200/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7075cf5206fca95776b844726640b7c2f6f7b89d5fbb4201cc739e018f95e8cb
3
+ size 21687
checkpoint-200/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0907d6758e3713a38066511dc86bd53d6a9524ea0a1bb2032c42fa2f66498ddb
3
+ size 21687
checkpoint-200/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e229d60ec309072e519bf994d2598b033f557d6c3dcfdfbc6f6a9da8fefac82
3
+ size 21687