Ciaranshu commited on
Commit
cfa17a3
1 Parent(s): 1b1d027
README.md CHANGED
@@ -1,3 +1,20 @@
1
  ---
2
- license: mit
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
  ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf8bd30104269d1d15bb25397a74694ee14867dba2fff2978c4eeb8f7f25b62c
3
+ size 26271757
checkpoint-200/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-200/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-200/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf8bd30104269d1d15bb25397a74694ee14867dba2fff2978c4eeb8f7f25b62c
3
+ size 26271757
checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fa912fe4be61e1713b00c004e901e7e51b5d683ad7f844e7795c5c4993359cd
3
+ size 52522949
checkpoint-200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e28258cdf4b281734c1141ae96be66bd23c633617ca02676935b05a66609d914
3
+ size 14575
checkpoint-200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0f645d4f6c47283041486abb9587c0828451cbed381a909bd5063c8be291fe2
3
+ size 627
checkpoint-200/trainer_state.json ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6959671974182129,
3
+ "best_model_checkpoint": "../../models/lora/llama-2-13b-multitask_cot_reflection/checkpoint-200",
4
+ "epoch": 12.5,
5
+ "global_step": 200,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.62,
12
+ "learning_rate": 2.9999999999999997e-05,
13
+ "loss": 2.1339,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 1.25,
18
+ "learning_rate": 5.9999999999999995e-05,
19
+ "loss": 2.0943,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 1.88,
24
+ "learning_rate": 8.999999999999999e-05,
25
+ "loss": 1.8574,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 2.5,
30
+ "learning_rate": 0.00011999999999999999,
31
+ "loss": 1.4125,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 3.12,
36
+ "learning_rate": 0.00015,
37
+ "loss": 1.0679,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 3.75,
42
+ "learning_rate": 0.00017999999999999998,
43
+ "loss": 0.9275,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 4.38,
48
+ "learning_rate": 0.00020999999999999998,
49
+ "loss": 0.8729,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 5.0,
54
+ "learning_rate": 0.00023999999999999998,
55
+ "loss": 0.8581,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 5.62,
60
+ "learning_rate": 0.00027,
61
+ "loss": 0.8374,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 6.25,
66
+ "learning_rate": 0.0003,
67
+ "loss": 0.7997,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 6.88,
72
+ "learning_rate": 0.0002921052631578947,
73
+ "loss": 0.7573,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 7.5,
78
+ "learning_rate": 0.0002842105263157894,
79
+ "loss": 0.7445,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 8.12,
84
+ "learning_rate": 0.0002763157894736842,
85
+ "loss": 0.7352,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 8.75,
90
+ "learning_rate": 0.0002684210526315789,
91
+ "loss": 0.7108,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 9.38,
96
+ "learning_rate": 0.0002605263157894737,
97
+ "loss": 0.7131,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 10.0,
102
+ "learning_rate": 0.00025263157894736836,
103
+ "loss": 0.7051,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 10.62,
108
+ "learning_rate": 0.00024473684210526314,
109
+ "loss": 0.708,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 11.25,
114
+ "learning_rate": 0.00023684210526315788,
115
+ "loss": 0.6888,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 11.88,
120
+ "learning_rate": 0.00022894736842105263,
121
+ "loss": 0.6695,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 12.5,
126
+ "learning_rate": 0.00022105263157894733,
127
+ "loss": 0.6579,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 12.5,
132
+ "eval_loss": 0.6959671974182129,
133
+ "eval_runtime": 1.6927,
134
+ "eval_samples_per_second": 12.406,
135
+ "eval_steps_per_second": 1.772,
136
+ "step": 200
137
+ }
138
+ ],
139
+ "max_steps": 480,
140
+ "num_train_epochs": 30,
141
+ "total_flos": 5.103476994686976e+17,
142
+ "trial_name": null,
143
+ "trial_params": null
144
+ }
checkpoint-200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8d398eeaedbb2196de833e0dff3a857dd2f033714573f3bae1c67ba25a7da99
3
+ size 4027
checkpoint-400/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - load_in_4bit: False
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: fp4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float32
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-400/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-chat-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-400/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0abe301974566ef6fd33c26ad98c09ce255321fc5dfe242c7db0148880807e8f
3
+ size 26271757
checkpoint-400/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc9317a7264560cd49b9e0601a7af74042090baec7895d6118705f99ab808013
3
+ size 52523141
checkpoint-400/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:751622289c8a992ecbb17997c540208f4518d36040f9dab912ea76f5cd90b5ab
3
+ size 14575
checkpoint-400/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09e9cadf02e5dfc5161a89b06942054146e1c2c5cd62cfeaf37b0207c334b84c
3
+ size 627
checkpoint-400/trainer_state.json ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6959671974182129,
3
+ "best_model_checkpoint": "../../models/lora/llama-2-13b-multitask_cot_reflection/checkpoint-200",
4
+ "epoch": 25.0,
5
+ "global_step": 400,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.62,
12
+ "learning_rate": 2.9999999999999997e-05,
13
+ "loss": 2.1339,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 1.25,
18
+ "learning_rate": 5.9999999999999995e-05,
19
+ "loss": 2.0943,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 1.88,
24
+ "learning_rate": 8.999999999999999e-05,
25
+ "loss": 1.8574,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 2.5,
30
+ "learning_rate": 0.00011999999999999999,
31
+ "loss": 1.4125,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 3.12,
36
+ "learning_rate": 0.00015,
37
+ "loss": 1.0679,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 3.75,
42
+ "learning_rate": 0.00017999999999999998,
43
+ "loss": 0.9275,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 4.38,
48
+ "learning_rate": 0.00020999999999999998,
49
+ "loss": 0.8729,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 5.0,
54
+ "learning_rate": 0.00023999999999999998,
55
+ "loss": 0.8581,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 5.62,
60
+ "learning_rate": 0.00027,
61
+ "loss": 0.8374,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 6.25,
66
+ "learning_rate": 0.0003,
67
+ "loss": 0.7997,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 6.88,
72
+ "learning_rate": 0.0002921052631578947,
73
+ "loss": 0.7573,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 7.5,
78
+ "learning_rate": 0.0002842105263157894,
79
+ "loss": 0.7445,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 8.12,
84
+ "learning_rate": 0.0002763157894736842,
85
+ "loss": 0.7352,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 8.75,
90
+ "learning_rate": 0.0002684210526315789,
91
+ "loss": 0.7108,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 9.38,
96
+ "learning_rate": 0.0002605263157894737,
97
+ "loss": 0.7131,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 10.0,
102
+ "learning_rate": 0.00025263157894736836,
103
+ "loss": 0.7051,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 10.62,
108
+ "learning_rate": 0.00024473684210526314,
109
+ "loss": 0.708,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 11.25,
114
+ "learning_rate": 0.00023684210526315788,
115
+ "loss": 0.6888,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 11.88,
120
+ "learning_rate": 0.00022894736842105263,
121
+ "loss": 0.6695,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 12.5,
126
+ "learning_rate": 0.00022105263157894733,
127
+ "loss": 0.6579,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 12.5,
132
+ "eval_loss": 0.6959671974182129,
133
+ "eval_runtime": 1.6927,
134
+ "eval_samples_per_second": 12.406,
135
+ "eval_steps_per_second": 1.772,
136
+ "step": 200
137
+ },
138
+ {
139
+ "epoch": 13.12,
140
+ "learning_rate": 0.00021315789473684208,
141
+ "loss": 0.662,
142
+ "step": 210
143
+ },
144
+ {
145
+ "epoch": 13.75,
146
+ "learning_rate": 0.00020526315789473683,
147
+ "loss": 0.6268,
148
+ "step": 220
149
+ },
150
+ {
151
+ "epoch": 14.38,
152
+ "learning_rate": 0.00019736842105263157,
153
+ "loss": 0.6317,
154
+ "step": 230
155
+ },
156
+ {
157
+ "epoch": 15.0,
158
+ "learning_rate": 0.0001894736842105263,
159
+ "loss": 0.6202,
160
+ "step": 240
161
+ },
162
+ {
163
+ "epoch": 15.62,
164
+ "learning_rate": 0.00018157894736842105,
165
+ "loss": 0.6137,
166
+ "step": 250
167
+ },
168
+ {
169
+ "epoch": 16.25,
170
+ "learning_rate": 0.0001736842105263158,
171
+ "loss": 0.6103,
172
+ "step": 260
173
+ },
174
+ {
175
+ "epoch": 16.88,
176
+ "learning_rate": 0.00016578947368421052,
177
+ "loss": 0.5757,
178
+ "step": 270
179
+ },
180
+ {
181
+ "epoch": 17.5,
182
+ "learning_rate": 0.00015789473684210524,
183
+ "loss": 0.5626,
184
+ "step": 280
185
+ },
186
+ {
187
+ "epoch": 18.12,
188
+ "learning_rate": 0.00015,
189
+ "loss": 0.5677,
190
+ "step": 290
191
+ },
192
+ {
193
+ "epoch": 18.75,
194
+ "learning_rate": 0.0001421052631578947,
195
+ "loss": 0.53,
196
+ "step": 300
197
+ },
198
+ {
199
+ "epoch": 19.38,
200
+ "learning_rate": 0.00013421052631578946,
201
+ "loss": 0.5328,
202
+ "step": 310
203
+ },
204
+ {
205
+ "epoch": 20.0,
206
+ "learning_rate": 0.00012631578947368418,
207
+ "loss": 0.5253,
208
+ "step": 320
209
+ },
210
+ {
211
+ "epoch": 20.62,
212
+ "learning_rate": 0.00011842105263157894,
213
+ "loss": 0.5356,
214
+ "step": 330
215
+ },
216
+ {
217
+ "epoch": 21.25,
218
+ "learning_rate": 0.00011052631578947366,
219
+ "loss": 0.4961,
220
+ "step": 340
221
+ },
222
+ {
223
+ "epoch": 21.88,
224
+ "learning_rate": 0.00010263157894736841,
225
+ "loss": 0.4851,
226
+ "step": 350
227
+ },
228
+ {
229
+ "epoch": 22.5,
230
+ "learning_rate": 9.473684210526315e-05,
231
+ "loss": 0.4795,
232
+ "step": 360
233
+ },
234
+ {
235
+ "epoch": 23.12,
236
+ "learning_rate": 8.68421052631579e-05,
237
+ "loss": 0.4783,
238
+ "step": 370
239
+ },
240
+ {
241
+ "epoch": 23.75,
242
+ "learning_rate": 7.894736842105262e-05,
243
+ "loss": 0.4464,
244
+ "step": 380
245
+ },
246
+ {
247
+ "epoch": 24.38,
248
+ "learning_rate": 7.105263157894735e-05,
249
+ "loss": 0.4348,
250
+ "step": 390
251
+ },
252
+ {
253
+ "epoch": 25.0,
254
+ "learning_rate": 6.315789473684209e-05,
255
+ "loss": 0.4555,
256
+ "step": 400
257
+ },
258
+ {
259
+ "epoch": 25.0,
260
+ "eval_loss": 0.7709586024284363,
261
+ "eval_runtime": 1.6824,
262
+ "eval_samples_per_second": 12.482,
263
+ "eval_steps_per_second": 1.783,
264
+ "step": 400
265
+ }
266
+ ],
267
+ "max_steps": 480,
268
+ "num_train_epochs": 30,
269
+ "total_flos": 1.0189844879936717e+18,
270
+ "trial_name": null,
271
+ "trial_params": null
272
+ }
checkpoint-400/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8d398eeaedbb2196de833e0dff3a857dd2f033714573f3bae1c67ba25a7da99
3
+ size 4027
runs/Sep16_00-28-24_ltl-gpu04/events.out.tfevents.1694820504.ltl-gpu04.368812.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df11efd288af2dd866b022da83b2c86e5266ca7426da1ea27b492b4ec6a1e6eb
3
+ size 12903