erfanvaredi commited on
Commit
42dd07b
1 Parent(s): b75e54e

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-generation
5
+ widget:
6
+ - text: "I love AutoTrain because "
7
+ ---
8
+
9
+ # Model Trained Using AutoTrain
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "HuggingFaceH4/zephyr-7b-beta",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ab9ca3267c6f5c4a98b1363be8fce6c3be51f1bdc80ba1ccae9e3bbd7084b34
3
+ size 27309386
checkpoint-160/README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float16
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: False
22
+ - load_in_4bit: True
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: nf4
28
+ - bnb_4bit_use_double_quant: False
29
+ - bnb_4bit_compute_dtype: float16
30
+ ### Framework versions
31
+
32
+ - PEFT 0.5.0
33
+
34
+ - PEFT 0.5.0
checkpoint-160/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "HuggingFaceH4/zephyr-7b-beta",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-160/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ab9ca3267c6f5c4a98b1363be8fce6c3be51f1bdc80ba1ccae9e3bbd7084b34
3
+ size 27309386
checkpoint-160/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdd1484c9b9aac2d9e9bf5b9a131feda40668aa7b4b6021253fd873b5eae72b9
3
+ size 54633978
checkpoint-160/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d047b51b495ab68ab9d47d7b936e937c3e7e36a1d7459f9719bcf095cc1cf91
3
+ size 14180
checkpoint-160/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67001f2c9895fce0eb04936e64f454d0502c13ede1e1ac3442f55578296ea383
3
+ size 1064
checkpoint-160/special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
checkpoint-160/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-160/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
checkpoint-160/tokenizer_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [
29
+ "<unk>",
30
+ "<s>",
31
+ "</s>"
32
+ ],
33
+ "bos_token": "<s>",
34
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "</s>",
37
+ "legacy": true,
38
+ "model_max_length": 1024,
39
+ "pad_token": "</s>",
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "tokenizer_class": "LlamaTokenizer",
43
+ "truncation_side": "left",
44
+ "unk_token": "<unk>",
45
+ "use_default_system_prompt": true
46
+ }
checkpoint-160/trainer_state.json ADDED
@@ -0,0 +1,979 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.19047619047619,
5
+ "eval_steps": 500,
6
+ "global_step": 160,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "learning_rate": 2.3809523809523808e-06,
14
+ "loss": 1.4962,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.02,
19
+ "learning_rate": 4.7619047619047615e-06,
20
+ "loss": 1.5498,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.04,
25
+ "learning_rate": 7.142857142857143e-06,
26
+ "loss": 1.4765,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.05,
31
+ "learning_rate": 9.523809523809523e-06,
32
+ "loss": 1.4702,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.06,
37
+ "learning_rate": 1.1904761904761905e-05,
38
+ "loss": 1.4907,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.07,
43
+ "learning_rate": 1.4285714285714285e-05,
44
+ "loss": 1.4473,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.08,
49
+ "learning_rate": 1.6666666666666667e-05,
50
+ "loss": 1.4735,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.1,
55
+ "learning_rate": 1.9047619047619046e-05,
56
+ "loss": 1.4303,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.11,
61
+ "learning_rate": 2.1428571428571428e-05,
62
+ "loss": 1.3977,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.12,
67
+ "learning_rate": 2.380952380952381e-05,
68
+ "loss": 1.4021,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.13,
73
+ "learning_rate": 2.6190476190476192e-05,
74
+ "loss": 1.3709,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.14,
79
+ "learning_rate": 2.857142857142857e-05,
80
+ "loss": 1.3842,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.15,
85
+ "learning_rate": 3.095238095238095e-05,
86
+ "loss": 1.3646,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.17,
91
+ "learning_rate": 3.3333333333333335e-05,
92
+ "loss": 1.3196,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.18,
97
+ "learning_rate": 3.571428571428572e-05,
98
+ "loss": 1.2991,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.19,
103
+ "learning_rate": 3.809523809523809e-05,
104
+ "loss": 1.2953,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 1.01,
109
+ "learning_rate": 4.047619047619048e-05,
110
+ "loss": 1.2344,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 1.02,
115
+ "learning_rate": 4.2857142857142856e-05,
116
+ "loss": 1.2148,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 1.04,
121
+ "learning_rate": 4.523809523809524e-05,
122
+ "loss": 1.2092,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 1.05,
127
+ "learning_rate": 4.761904761904762e-05,
128
+ "loss": 1.154,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 1.06,
133
+ "learning_rate": 5e-05,
134
+ "loss": 1.1733,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 1.07,
139
+ "learning_rate": 5.2380952380952384e-05,
140
+ "loss": 1.1218,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 1.08,
145
+ "learning_rate": 5.4761904761904766e-05,
146
+ "loss": 1.1347,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 1.1,
151
+ "learning_rate": 5.714285714285714e-05,
152
+ "loss": 1.1216,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 1.11,
157
+ "learning_rate": 5.9523809523809524e-05,
158
+ "loss": 1.0923,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 1.12,
163
+ "learning_rate": 6.19047619047619e-05,
164
+ "loss": 1.0948,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 1.13,
169
+ "learning_rate": 6.428571428571429e-05,
170
+ "loss": 1.0617,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 1.14,
175
+ "learning_rate": 6.666666666666667e-05,
176
+ "loss": 1.033,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 1.15,
181
+ "learning_rate": 6.904761904761905e-05,
182
+ "loss": 1.0342,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 1.17,
187
+ "learning_rate": 7.142857142857143e-05,
188
+ "loss": 1.0059,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 1.18,
193
+ "learning_rate": 7.380952380952382e-05,
194
+ "loss": 0.9941,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 1.19,
199
+ "learning_rate": 7.619047619047618e-05,
200
+ "loss": 0.9912,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 2.01,
205
+ "learning_rate": 7.857142857142858e-05,
206
+ "loss": 0.9595,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 2.02,
211
+ "learning_rate": 8.095238095238096e-05,
212
+ "loss": 0.9719,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 2.04,
217
+ "learning_rate": 8.333333333333334e-05,
218
+ "loss": 0.9437,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 2.05,
223
+ "learning_rate": 8.571428571428571e-05,
224
+ "loss": 0.9174,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 2.06,
229
+ "learning_rate": 8.80952380952381e-05,
230
+ "loss": 0.9494,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 2.07,
235
+ "learning_rate": 9.047619047619048e-05,
236
+ "loss": 0.8654,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 2.08,
241
+ "learning_rate": 9.285714285714286e-05,
242
+ "loss": 0.8685,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 2.1,
247
+ "learning_rate": 9.523809523809524e-05,
248
+ "loss": 0.8946,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 2.11,
253
+ "learning_rate": 9.761904761904762e-05,
254
+ "loss": 0.875,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 2.12,
259
+ "learning_rate": 0.0001,
260
+ "loss": 0.827,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 2.13,
265
+ "learning_rate": 0.00010238095238095237,
266
+ "loss": 0.8373,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 2.14,
271
+ "learning_rate": 0.00010476190476190477,
272
+ "loss": 0.8291,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 2.15,
277
+ "learning_rate": 0.00010714285714285715,
278
+ "loss": 0.8238,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 2.17,
283
+ "learning_rate": 0.00010952380952380953,
284
+ "loss": 0.7971,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 2.18,
289
+ "learning_rate": 0.00011190476190476191,
290
+ "loss": 0.7925,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 2.19,
295
+ "learning_rate": 0.00011428571428571428,
296
+ "loss": 0.7865,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 3.01,
301
+ "learning_rate": 0.00011666666666666668,
302
+ "loss": 0.7714,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 3.02,
307
+ "learning_rate": 0.00011904761904761905,
308
+ "loss": 0.7508,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 3.04,
313
+ "learning_rate": 0.00012142857142857143,
314
+ "loss": 0.7757,
315
+ "step": 51
316
+ },
317
+ {
318
+ "epoch": 3.05,
319
+ "learning_rate": 0.0001238095238095238,
320
+ "loss": 0.7479,
321
+ "step": 52
322
+ },
323
+ {
324
+ "epoch": 3.06,
325
+ "learning_rate": 0.0001261904761904762,
326
+ "loss": 0.7393,
327
+ "step": 53
328
+ },
329
+ {
330
+ "epoch": 3.07,
331
+ "learning_rate": 0.00012857142857142858,
332
+ "loss": 0.7341,
333
+ "step": 54
334
+ },
335
+ {
336
+ "epoch": 3.08,
337
+ "learning_rate": 0.00013095238095238096,
338
+ "loss": 0.7271,
339
+ "step": 55
340
+ },
341
+ {
342
+ "epoch": 3.1,
343
+ "learning_rate": 0.00013333333333333334,
344
+ "loss": 0.7134,
345
+ "step": 56
346
+ },
347
+ {
348
+ "epoch": 3.11,
349
+ "learning_rate": 0.00013571428571428572,
350
+ "loss": 0.7295,
351
+ "step": 57
352
+ },
353
+ {
354
+ "epoch": 3.12,
355
+ "learning_rate": 0.0001380952380952381,
356
+ "loss": 0.7143,
357
+ "step": 58
358
+ },
359
+ {
360
+ "epoch": 3.13,
361
+ "learning_rate": 0.00014047619047619049,
362
+ "loss": 0.6903,
363
+ "step": 59
364
+ },
365
+ {
366
+ "epoch": 3.14,
367
+ "learning_rate": 0.00014285714285714287,
368
+ "loss": 0.7002,
369
+ "step": 60
370
+ },
371
+ {
372
+ "epoch": 3.15,
373
+ "learning_rate": 0.00014523809523809525,
374
+ "loss": 0.6684,
375
+ "step": 61
376
+ },
377
+ {
378
+ "epoch": 3.17,
379
+ "learning_rate": 0.00014761904761904763,
380
+ "loss": 0.6841,
381
+ "step": 62
382
+ },
383
+ {
384
+ "epoch": 3.18,
385
+ "learning_rate": 0.00015000000000000001,
386
+ "loss": 0.6641,
387
+ "step": 63
388
+ },
389
+ {
390
+ "epoch": 3.19,
391
+ "learning_rate": 0.00015238095238095237,
392
+ "loss": 0.6571,
393
+ "step": 64
394
+ },
395
+ {
396
+ "epoch": 4.01,
397
+ "learning_rate": 0.00015476190476190478,
398
+ "loss": 0.6287,
399
+ "step": 65
400
+ },
401
+ {
402
+ "epoch": 4.02,
403
+ "learning_rate": 0.00015714285714285716,
404
+ "loss": 0.6545,
405
+ "step": 66
406
+ },
407
+ {
408
+ "epoch": 4.04,
409
+ "learning_rate": 0.00015952380952380954,
410
+ "loss": 0.63,
411
+ "step": 67
412
+ },
413
+ {
414
+ "epoch": 4.05,
415
+ "learning_rate": 0.00016190476190476192,
416
+ "loss": 0.639,
417
+ "step": 68
418
+ },
419
+ {
420
+ "epoch": 4.06,
421
+ "learning_rate": 0.00016428571428571428,
422
+ "loss": 0.6242,
423
+ "step": 69
424
+ },
425
+ {
426
+ "epoch": 4.07,
427
+ "learning_rate": 0.0001666666666666667,
428
+ "loss": 0.6127,
429
+ "step": 70
430
+ },
431
+ {
432
+ "epoch": 4.08,
433
+ "learning_rate": 0.00016904761904761904,
434
+ "loss": 0.5939,
435
+ "step": 71
436
+ },
437
+ {
438
+ "epoch": 4.1,
439
+ "learning_rate": 0.00017142857142857143,
440
+ "loss": 0.6449,
441
+ "step": 72
442
+ },
443
+ {
444
+ "epoch": 4.11,
445
+ "learning_rate": 0.00017380952380952383,
446
+ "loss": 0.6348,
447
+ "step": 73
448
+ },
449
+ {
450
+ "epoch": 4.12,
451
+ "learning_rate": 0.0001761904761904762,
452
+ "loss": 0.6129,
453
+ "step": 74
454
+ },
455
+ {
456
+ "epoch": 4.13,
457
+ "learning_rate": 0.0001785714285714286,
458
+ "loss": 0.587,
459
+ "step": 75
460
+ },
461
+ {
462
+ "epoch": 4.14,
463
+ "learning_rate": 0.00018095238095238095,
464
+ "loss": 0.5818,
465
+ "step": 76
466
+ },
467
+ {
468
+ "epoch": 4.15,
469
+ "learning_rate": 0.00018333333333333334,
470
+ "loss": 0.5803,
471
+ "step": 77
472
+ },
473
+ {
474
+ "epoch": 4.17,
475
+ "learning_rate": 0.00018571428571428572,
476
+ "loss": 0.6319,
477
+ "step": 78
478
+ },
479
+ {
480
+ "epoch": 4.18,
481
+ "learning_rate": 0.0001880952380952381,
482
+ "loss": 0.6171,
483
+ "step": 79
484
+ },
485
+ {
486
+ "epoch": 4.19,
487
+ "learning_rate": 0.00019047619047619048,
488
+ "loss": 0.5772,
489
+ "step": 80
490
+ },
491
+ {
492
+ "epoch": 5.01,
493
+ "learning_rate": 0.00019285714285714286,
494
+ "loss": 0.5688,
495
+ "step": 81
496
+ },
497
+ {
498
+ "epoch": 5.02,
499
+ "learning_rate": 0.00019523809523809525,
500
+ "loss": 0.5648,
501
+ "step": 82
502
+ },
503
+ {
504
+ "epoch": 5.04,
505
+ "learning_rate": 0.00019761904761904763,
506
+ "loss": 0.5557,
507
+ "step": 83
508
+ },
509
+ {
510
+ "epoch": 5.05,
511
+ "learning_rate": 0.0002,
512
+ "loss": 0.5314,
513
+ "step": 84
514
+ },
515
+ {
516
+ "epoch": 5.06,
517
+ "learning_rate": 0.0001999991365731819,
518
+ "loss": 0.5659,
519
+ "step": 85
520
+ },
521
+ {
522
+ "epoch": 5.07,
523
+ "learning_rate": 0.0001999965463076377,
524
+ "loss": 0.5497,
525
+ "step": 86
526
+ },
527
+ {
528
+ "epoch": 5.08,
529
+ "learning_rate": 0.0001999922292480975,
530
+ "loss": 0.5202,
531
+ "step": 87
532
+ },
533
+ {
534
+ "epoch": 5.1,
535
+ "learning_rate": 0.00019998618546911056,
536
+ "loss": 0.5209,
537
+ "step": 88
538
+ },
539
+ {
540
+ "epoch": 5.11,
541
+ "learning_rate": 0.0001999784150750442,
542
+ "loss": 0.5657,
543
+ "step": 89
544
+ },
545
+ {
546
+ "epoch": 5.12,
547
+ "learning_rate": 0.00019996891820008164,
548
+ "loss": 0.5286,
549
+ "step": 90
550
+ },
551
+ {
552
+ "epoch": 5.13,
553
+ "learning_rate": 0.0001999576950082201,
554
+ "loss": 0.5218,
555
+ "step": 91
556
+ },
557
+ {
558
+ "epoch": 5.14,
559
+ "learning_rate": 0.00019994474569326757,
560
+ "loss": 0.5362,
561
+ "step": 92
562
+ },
563
+ {
564
+ "epoch": 5.15,
565
+ "learning_rate": 0.00019993007047883988,
566
+ "loss": 0.5371,
567
+ "step": 93
568
+ },
569
+ {
570
+ "epoch": 5.17,
571
+ "learning_rate": 0.00019991366961835642,
572
+ "loss": 0.5427,
573
+ "step": 94
574
+ },
575
+ {
576
+ "epoch": 5.18,
577
+ "learning_rate": 0.00019989554339503612,
578
+ "loss": 0.5583,
579
+ "step": 95
580
+ },
581
+ {
582
+ "epoch": 5.19,
583
+ "learning_rate": 0.00019987569212189224,
584
+ "loss": 0.5376,
585
+ "step": 96
586
+ },
587
+ {
588
+ "epoch": 6.01,
589
+ "learning_rate": 0.0001998541161417273,
590
+ "loss": 0.5138,
591
+ "step": 97
592
+ },
593
+ {
594
+ "epoch": 6.02,
595
+ "learning_rate": 0.00019983081582712685,
596
+ "loss": 0.5236,
597
+ "step": 98
598
+ },
599
+ {
600
+ "epoch": 6.04,
601
+ "learning_rate": 0.0001998057915804532,
602
+ "loss": 0.4955,
603
+ "step": 99
604
+ },
605
+ {
606
+ "epoch": 6.05,
607
+ "learning_rate": 0.0001997790438338385,
608
+ "loss": 0.4824,
609
+ "step": 100
610
+ },
611
+ {
612
+ "epoch": 6.06,
613
+ "learning_rate": 0.00019975057304917718,
614
+ "loss": 0.464,
615
+ "step": 101
616
+ },
617
+ {
618
+ "epoch": 6.07,
619
+ "learning_rate": 0.00019972037971811802,
620
+ "loss": 0.4967,
621
+ "step": 102
622
+ },
623
+ {
624
+ "epoch": 6.08,
625
+ "learning_rate": 0.00019968846436205567,
626
+ "loss": 0.4571,
627
+ "step": 103
628
+ },
629
+ {
630
+ "epoch": 6.1,
631
+ "learning_rate": 0.00019965482753212156,
632
+ "loss": 0.4994,
633
+ "step": 104
634
+ },
635
+ {
636
+ "epoch": 6.11,
637
+ "learning_rate": 0.00019961946980917456,
638
+ "loss": 0.4789,
639
+ "step": 105
640
+ },
641
+ {
642
+ "epoch": 6.12,
643
+ "learning_rate": 0.0001995823918037908,
644
+ "loss": 0.4653,
645
+ "step": 106
646
+ },
647
+ {
648
+ "epoch": 6.13,
649
+ "learning_rate": 0.0001995435941562531,
650
+ "loss": 0.489,
651
+ "step": 107
652
+ },
653
+ {
654
+ "epoch": 6.14,
655
+ "learning_rate": 0.00019950307753654017,
656
+ "loss": 0.501,
657
+ "step": 108
658
+ },
659
+ {
660
+ "epoch": 6.15,
661
+ "learning_rate": 0.00019946084264431459,
662
+ "loss": 0.462,
663
+ "step": 109
664
+ },
665
+ {
666
+ "epoch": 6.17,
667
+ "learning_rate": 0.0001994168902089112,
668
+ "loss": 0.4511,
669
+ "step": 110
670
+ },
671
+ {
672
+ "epoch": 6.18,
673
+ "learning_rate": 0.00019937122098932428,
674
+ "loss": 0.4744,
675
+ "step": 111
676
+ },
677
+ {
678
+ "epoch": 6.19,
679
+ "learning_rate": 0.00019932383577419432,
680
+ "loss": 0.4991,
681
+ "step": 112
682
+ },
683
+ {
684
+ "epoch": 7.01,
685
+ "learning_rate": 0.00019927473538179467,
686
+ "loss": 0.4277,
687
+ "step": 113
688
+ },
689
+ {
690
+ "epoch": 7.02,
691
+ "learning_rate": 0.00019922392066001722,
692
+ "loss": 0.4083,
693
+ "step": 114
694
+ },
695
+ {
696
+ "epoch": 7.04,
697
+ "learning_rate": 0.00019917139248635786,
698
+ "loss": 0.424,
699
+ "step": 115
700
+ },
701
+ {
702
+ "epoch": 7.05,
703
+ "learning_rate": 0.0001991171517679013,
704
+ "loss": 0.4305,
705
+ "step": 116
706
+ },
707
+ {
708
+ "epoch": 7.06,
709
+ "learning_rate": 0.0001990611994413053,
710
+ "loss": 0.4379,
711
+ "step": 117
712
+ },
713
+ {
714
+ "epoch": 7.07,
715
+ "learning_rate": 0.00019900353647278466,
716
+ "loss": 0.4251,
717
+ "step": 118
718
+ },
719
+ {
720
+ "epoch": 7.08,
721
+ "learning_rate": 0.00019894416385809444,
722
+ "loss": 0.4367,
723
+ "step": 119
724
+ },
725
+ {
726
+ "epoch": 7.1,
727
+ "learning_rate": 0.00019888308262251285,
728
+ "loss": 0.4524,
729
+ "step": 120
730
+ },
731
+ {
732
+ "epoch": 7.11,
733
+ "learning_rate": 0.0001988202938208234,
734
+ "loss": 0.4351,
735
+ "step": 121
736
+ },
737
+ {
738
+ "epoch": 7.12,
739
+ "learning_rate": 0.00019875579853729676,
740
+ "loss": 0.4269,
741
+ "step": 122
742
+ },
743
+ {
744
+ "epoch": 7.13,
745
+ "learning_rate": 0.00019868959788567212,
746
+ "loss": 0.4128,
747
+ "step": 123
748
+ },
749
+ {
750
+ "epoch": 7.14,
751
+ "learning_rate": 0.00019862169300913785,
752
+ "loss": 0.4383,
753
+ "step": 124
754
+ },
755
+ {
756
+ "epoch": 7.15,
757
+ "learning_rate": 0.0001985520850803117,
758
+ "loss": 0.43,
759
+ "step": 125
760
+ },
761
+ {
762
+ "epoch": 7.17,
763
+ "learning_rate": 0.00019848077530122083,
764
+ "loss": 0.4613,
765
+ "step": 126
766
+ },
767
+ {
768
+ "epoch": 7.18,
769
+ "learning_rate": 0.00019840776490328066,
770
+ "loss": 0.4438,
771
+ "step": 127
772
+ },
773
+ {
774
+ "epoch": 7.19,
775
+ "learning_rate": 0.00019833305514727395,
776
+ "loss": 0.4393,
777
+ "step": 128
778
+ },
779
+ {
780
+ "epoch": 8.01,
781
+ "learning_rate": 0.00019825664732332884,
782
+ "loss": 0.4018,
783
+ "step": 129
784
+ },
785
+ {
786
+ "epoch": 8.02,
787
+ "learning_rate": 0.0001981785427508966,
788
+ "loss": 0.3602,
789
+ "step": 130
790
+ },
791
+ {
792
+ "epoch": 8.04,
793
+ "learning_rate": 0.00019809874277872886,
794
+ "loss": 0.3728,
795
+ "step": 131
796
+ },
797
+ {
798
+ "epoch": 8.05,
799
+ "learning_rate": 0.00019801724878485438,
800
+ "loss": 0.3727,
801
+ "step": 132
802
+ },
803
+ {
804
+ "epoch": 8.06,
805
+ "learning_rate": 0.00019793406217655517,
806
+ "loss": 0.3898,
807
+ "step": 133
808
+ },
809
+ {
810
+ "epoch": 8.07,
811
+ "learning_rate": 0.00019784918439034216,
812
+ "loss": 0.3964,
813
+ "step": 134
814
+ },
815
+ {
816
+ "epoch": 8.08,
817
+ "learning_rate": 0.00019776261689193048,
818
+ "loss": 0.3741,
819
+ "step": 135
820
+ },
821
+ {
822
+ "epoch": 8.1,
823
+ "learning_rate": 0.00019767436117621413,
824
+ "loss": 0.3853,
825
+ "step": 136
826
+ },
827
+ {
828
+ "epoch": 8.11,
829
+ "learning_rate": 0.00019758441876724017,
830
+ "loss": 0.3783,
831
+ "step": 137
832
+ },
833
+ {
834
+ "epoch": 8.12,
835
+ "learning_rate": 0.00019749279121818235,
836
+ "loss": 0.3946,
837
+ "step": 138
838
+ },
839
+ {
840
+ "epoch": 8.13,
841
+ "learning_rate": 0.00019739948011131438,
842
+ "loss": 0.382,
843
+ "step": 139
844
+ },
845
+ {
846
+ "epoch": 8.14,
847
+ "learning_rate": 0.00019730448705798239,
848
+ "loss": 0.3858,
849
+ "step": 140
850
+ },
851
+ {
852
+ "epoch": 8.15,
853
+ "learning_rate": 0.00019720781369857746,
854
+ "loss": 0.3769,
855
+ "step": 141
856
+ },
857
+ {
858
+ "epoch": 8.17,
859
+ "learning_rate": 0.000197109461702507,
860
+ "loss": 0.3988,
861
+ "step": 142
862
+ },
863
+ {
864
+ "epoch": 8.18,
865
+ "learning_rate": 0.00019700943276816603,
866
+ "loss": 0.388,
867
+ "step": 143
868
+ },
869
+ {
870
+ "epoch": 8.19,
871
+ "learning_rate": 0.0001969077286229078,
872
+ "loss": 0.3886,
873
+ "step": 144
874
+ },
875
+ {
876
+ "epoch": 9.01,
877
+ "learning_rate": 0.00019680435102301412,
878
+ "loss": 0.3481,
879
+ "step": 145
880
+ },
881
+ {
882
+ "epoch": 9.02,
883
+ "learning_rate": 0.00019669930175366472,
884
+ "loss": 0.3392,
885
+ "step": 146
886
+ },
887
+ {
888
+ "epoch": 9.04,
889
+ "learning_rate": 0.00019659258262890683,
890
+ "loss": 0.3199,
891
+ "step": 147
892
+ },
893
+ {
894
+ "epoch": 9.05,
895
+ "learning_rate": 0.00019648419549162348,
896
+ "loss": 0.3502,
897
+ "step": 148
898
+ },
899
+ {
900
+ "epoch": 9.06,
901
+ "learning_rate": 0.00019637414221350196,
902
+ "loss": 0.3302,
903
+ "step": 149
904
+ },
905
+ {
906
+ "epoch": 9.07,
907
+ "learning_rate": 0.0001962624246950012,
908
+ "loss": 0.3369,
909
+ "step": 150
910
+ },
911
+ {
912
+ "epoch": 9.08,
913
+ "learning_rate": 0.00019614904486531934,
914
+ "loss": 0.3271,
915
+ "step": 151
916
+ },
917
+ {
918
+ "epoch": 9.1,
919
+ "learning_rate": 0.00019603400468235998,
920
+ "loss": 0.331,
921
+ "step": 152
922
+ },
923
+ {
924
+ "epoch": 9.11,
925
+ "learning_rate": 0.0001959173061326988,
926
+ "loss": 0.3368,
927
+ "step": 153
928
+ },
929
+ {
930
+ "epoch": 9.12,
931
+ "learning_rate": 0.0001957989512315489,
932
+ "loss": 0.3282,
933
+ "step": 154
934
+ },
935
+ {
936
+ "epoch": 9.13,
937
+ "learning_rate": 0.0001956789420227262,
938
+ "loss": 0.3251,
939
+ "step": 155
940
+ },
941
+ {
942
+ "epoch": 9.14,
943
+ "learning_rate": 0.0001955572805786141,
944
+ "loss": 0.3312,
945
+ "step": 156
946
+ },
947
+ {
948
+ "epoch": 9.15,
949
+ "learning_rate": 0.00019543396900012763,
950
+ "loss": 0.3673,
951
+ "step": 157
952
+ },
953
+ {
954
+ "epoch": 9.17,
955
+ "learning_rate": 0.0001953090094166773,
956
+ "loss": 0.3287,
957
+ "step": 158
958
+ },
959
+ {
960
+ "epoch": 9.18,
961
+ "learning_rate": 0.00019518240398613227,
962
+ "loss": 0.3374,
963
+ "step": 159
964
+ },
965
+ {
966
+ "epoch": 9.19,
967
+ "learning_rate": 0.0001950541548947829,
968
+ "loss": 0.3209,
969
+ "step": 160
970
+ }
971
+ ],
972
+ "logging_steps": 1,
973
+ "max_steps": 840,
974
+ "num_train_epochs": 10,
975
+ "save_steps": 500,
976
+ "total_flos": 8.39611613970432e+16,
977
+ "trial_name": null,
978
+ "trial_params": null
979
+ }
checkpoint-160/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c1bfd0c042ee06ae7b8ce39a008e4889f8413520dc8fb3d6f4aee8d8165cb05
3
+ size 4536
runs/Nov12_20-05-09_c2af15b8f33b/events.out.tfevents.1699819510.c2af15b8f33b.28162.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf6886ddcff41cf47a4966cc6e21f8ddd97c865932f417892b373fb1a8220cc1
3
+ size 29668
special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [
29
+ "<unk>",
30
+ "<s>",
31
+ "</s>"
32
+ ],
33
+ "bos_token": "<s>",
34
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "</s>",
37
+ "legacy": true,
38
+ "model_max_length": 1024,
39
+ "pad_token": "</s>",
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "tokenizer_class": "LlamaTokenizer",
43
+ "truncation_side": "left",
44
+ "unk_token": "<unk>",
45
+ "use_default_system_prompt": true
46
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c1bfd0c042ee06ae7b8ce39a008e4889f8413520dc8fb3d6f4aee8d8165cb05
3
+ size 4536
training_params.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "HuggingFaceH4/zephyr-7b-beta", "data_path": ".", "project_name": "zephyr-7b-customer-support-finetuned6", "train_split": "train", "valid_split": null, "text_column": "text", "rejected_text_column": "rejected", "lr": 0.0002, "epochs": 10, "batch_size": 12, "warmup_ratio": 0.1, "gradient_accumulation": 1, "optimizer": "adamw_torch", "scheduler": "cosine", "weight_decay": 0.0, "max_grad_norm": 1.0, "seed": 42, "add_eos_token": false, "block_size": -1, "use_peft": true, "lora_r": 16, "lora_alpha": 32, "lora_dropout": 0.05, "logging_steps": 1, "evaluation_strategy": "accuracy", "save_total_limit": 1, "save_strategy": "epoch", "auto_find_batch_size": false, "fp16": false, "push_to_hub": true, "use_int8": false, "model_max_length": 1024, "repo_id": "erfanvaredi/zephyr-7b-customer-support-finetuned6", "use_int4": true, "trainer": "sft", "target_modules": "q_proj,v_proj", "merge_adapter": false, "username": null, "use_flash_attention_2": false, "log": "tensorboard", "disable_gradient_checkpointing": false, "model_ref": null, "dpo_beta": 0.1, "prompt_text_column": "prompt"}