Transformers
PyTorch
Inference Endpoints
xiuyul commited on
Commit
debca9a
1 Parent(s): fc72051

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,3 +1,60 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ base_model: state-spaces/mamba-2.8b-slimpj
4
+ datasets:
5
+ - HuggingFaceH4/ultrachat_200k
6
+ model-index:
7
+ - name: mamba-2.8b-ultrachat
8
+ results: []
9
  ---
10
+
11
+ # mamba-2.8b-ultrachat
12
+
13
+ This model is a fine-tuned version of [state-spaces/mamba-2.8b-slimpj](https://huggingface.co/state-spaces/mamba-2.8b-slimpj) on the [HuggingFaceH4/ultrachat_200k](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k) dataset.
14
+ It achieves the following results on the evaluation set:
15
+ - Loss: 1.1858
16
+
17
+ ## Model description
18
+
19
+ More information needed
20
+
21
+ ## Intended uses & limitations
22
+
23
+ More information needed
24
+
25
+ ## Training and evaluation data
26
+
27
+ More information needed
28
+
29
+ ## Training procedure
30
+
31
+ ### Training hyperparameters
32
+
33
+ The following hyperparameters were used during training:
34
+ - learning_rate: 2e-05
35
+ - train_batch_size: 4
36
+ - eval_batch_size: 4
37
+ - seed: 42
38
+ - distributed_type: multi-GPU
39
+ - num_devices: 8
40
+ - gradient_accumulation_steps: 16
41
+ - total_train_batch_size: 512
42
+ - total_eval_batch_size: 32
43
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
+ - lr_scheduler_type: cosine
45
+ - num_epochs: 1
46
+
47
+ ### Training results
48
+
49
+ | Training Loss | Epoch | Step | Validation Loss |
50
+ |:-------------:|:-----:|:----:|:---------------:|
51
+ | 2.0106 | 0.0 | 1 | 1.9092 |
52
+ | 1.1783 | 0.62 | 250 | 1.1858 |
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - Transformers 4.35.0
58
+ - Pytorch 2.1.1+cu121
59
+ - Datasets 2.14.6
60
+ - Tokenizers 0.14.1
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.62,
3
+ "eval_loss": 1.185780644416809,
4
+ "eval_runtime": 399.119,
5
+ "eval_samples": 23110,
6
+ "eval_samples_per_second": 57.903,
7
+ "eval_steps_per_second": 1.811,
8
+ "train_loss": 1.2281424560546874,
9
+ "train_runtime": 11653.8648,
10
+ "train_samples": 207865,
11
+ "train_samples_per_second": 17.837,
12
+ "train_steps_per_second": 0.035
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.62,
3
+ "eval_loss": 1.185780644416809,
4
+ "eval_runtime": 399.119,
5
+ "eval_samples": 23110,
6
+ "eval_samples_per_second": 57.903,
7
+ "eval_steps_per_second": 1.811
8
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4af6a2a681942dbb41649be2fb4a027b58c346073f039eb9f128a6920ef84071
3
+ size 5536898154
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|padding|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "50254": {
21
+ "content": " ",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": false
27
+ },
28
+ "50255": {
29
+ "content": " ",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": false
35
+ },
36
+ "50256": {
37
+ "content": " ",
38
+ "lstrip": false,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": false
43
+ },
44
+ "50257": {
45
+ "content": " ",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": false
51
+ },
52
+ "50258": {
53
+ "content": " ",
54
+ "lstrip": false,
55
+ "normalized": true,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": false
59
+ },
60
+ "50259": {
61
+ "content": " ",
62
+ "lstrip": false,
63
+ "normalized": true,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": false
67
+ },
68
+ "50260": {
69
+ "content": " ",
70
+ "lstrip": false,
71
+ "normalized": true,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": false
75
+ },
76
+ "50261": {
77
+ "content": " ",
78
+ "lstrip": false,
79
+ "normalized": true,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": false
83
+ },
84
+ "50262": {
85
+ "content": " ",
86
+ "lstrip": false,
87
+ "normalized": true,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": false
91
+ },
92
+ "50263": {
93
+ "content": " ",
94
+ "lstrip": false,
95
+ "normalized": true,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": false
99
+ },
100
+ "50264": {
101
+ "content": " ",
102
+ "lstrip": false,
103
+ "normalized": true,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": false
107
+ },
108
+ "50265": {
109
+ "content": " ",
110
+ "lstrip": false,
111
+ "normalized": true,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": false
115
+ },
116
+ "50266": {
117
+ "content": " ",
118
+ "lstrip": false,
119
+ "normalized": true,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": false
123
+ },
124
+ "50267": {
125
+ "content": " ",
126
+ "lstrip": false,
127
+ "normalized": true,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": false
131
+ },
132
+ "50268": {
133
+ "content": " ",
134
+ "lstrip": false,
135
+ "normalized": true,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": false
139
+ },
140
+ "50269": {
141
+ "content": " ",
142
+ "lstrip": false,
143
+ "normalized": true,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": false
147
+ },
148
+ "50270": {
149
+ "content": " ",
150
+ "lstrip": false,
151
+ "normalized": true,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": false
155
+ },
156
+ "50271": {
157
+ "content": " ",
158
+ "lstrip": false,
159
+ "normalized": true,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": false
163
+ },
164
+ "50272": {
165
+ "content": " ",
166
+ "lstrip": false,
167
+ "normalized": true,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": false
171
+ },
172
+ "50273": {
173
+ "content": " ",
174
+ "lstrip": false,
175
+ "normalized": true,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": false
179
+ },
180
+ "50274": {
181
+ "content": " ",
182
+ "lstrip": false,
183
+ "normalized": true,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": false
187
+ },
188
+ "50275": {
189
+ "content": " ",
190
+ "lstrip": false,
191
+ "normalized": true,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": false
195
+ },
196
+ "50276": {
197
+ "content": " ",
198
+ "lstrip": false,
199
+ "normalized": true,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": false
203
+ }
204
+ },
205
+ "bos_token": "<|endoftext|>",
206
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
207
+ "clean_up_tokenization_spaces": true,
208
+ "eos_token": "<|endoftext|>",
209
+ "model_max_length": 1000000000000000019884624838656,
210
+ "pad_token": "<|endoftext|>",
211
+ "tokenizer_class": "GPTNeoXTokenizer",
212
+ "unk_token": "<|endoftext|>"
213
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.62,
3
+ "train_loss": 1.2281424560546874,
4
+ "train_runtime": 11653.8648,
5
+ "train_samples": 207865,
6
+ "train_samples_per_second": 17.837,
7
+ "train_steps_per_second": 0.035
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.6157635467980296,
5
+ "eval_steps": 500,
6
+ "global_step": 250,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.9999700625010444e-05,
14
+ "loss": 2.0106,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.0,
19
+ "eval_loss": 1.9091932773590088,
20
+ "eval_runtime": 396.1093,
21
+ "eval_samples_per_second": 58.342,
22
+ "eval_steps_per_second": 1.825,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.01,
27
+ "learning_rate": 1.999251652147735e-05,
28
+ "loss": 1.7041,
29
+ "step": 5
30
+ },
31
+ {
32
+ "epoch": 0.02,
33
+ "learning_rate": 1.997007728639956e-05,
34
+ "loss": 1.5057,
35
+ "step": 10
36
+ },
37
+ {
38
+ "epoch": 0.04,
39
+ "learning_rate": 1.9932715879473385e-05,
40
+ "loss": 1.3651,
41
+ "step": 15
42
+ },
43
+ {
44
+ "epoch": 0.05,
45
+ "learning_rate": 1.9880488219356086e-05,
46
+ "loss": 1.3209,
47
+ "step": 20
48
+ },
49
+ {
50
+ "epoch": 0.06,
51
+ "learning_rate": 1.981347247496222e-05,
52
+ "loss": 1.2837,
53
+ "step": 25
54
+ },
55
+ {
56
+ "epoch": 0.07,
57
+ "learning_rate": 1.973176894846855e-05,
58
+ "loss": 1.2683,
59
+ "step": 30
60
+ },
61
+ {
62
+ "epoch": 0.09,
63
+ "learning_rate": 1.963549992519223e-05,
64
+ "loss": 1.2609,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.1,
69
+ "learning_rate": 1.9524809490566878e-05,
70
+ "loss": 1.2493,
71
+ "step": 40
72
+ },
73
+ {
74
+ "epoch": 0.11,
75
+ "learning_rate": 1.939986331449053e-05,
76
+ "loss": 1.2467,
77
+ "step": 45
78
+ },
79
+ {
80
+ "epoch": 0.12,
81
+ "learning_rate": 1.926084840336821e-05,
82
+ "loss": 1.238,
83
+ "step": 50
84
+ },
85
+ {
86
+ "epoch": 0.14,
87
+ "learning_rate": 1.910797282022027e-05,
88
+ "loss": 1.2322,
89
+ "step": 55
90
+ },
91
+ {
92
+ "epoch": 0.15,
93
+ "learning_rate": 1.894146537327533e-05,
94
+ "loss": 1.2183,
95
+ "step": 60
96
+ },
97
+ {
98
+ "epoch": 0.16,
99
+ "learning_rate": 1.8761575273514005e-05,
100
+ "loss": 1.2114,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.17,
105
+ "learning_rate": 1.8568571761675893e-05,
106
+ "loss": 1.2217,
107
+ "step": 70
108
+ },
109
+ {
110
+ "epoch": 0.18,
111
+ "learning_rate": 1.8362743705288127e-05,
112
+ "loss": 1.216,
113
+ "step": 75
114
+ },
115
+ {
116
+ "epoch": 0.2,
117
+ "learning_rate": 1.814439916631857e-05,
118
+ "loss": 1.2176,
119
+ "step": 80
120
+ },
121
+ {
122
+ "epoch": 0.21,
123
+ "learning_rate": 1.791386494010081e-05,
124
+ "loss": 1.2022,
125
+ "step": 85
126
+ },
127
+ {
128
+ "epoch": 0.22,
129
+ "learning_rate": 1.7671486066220965e-05,
130
+ "loss": 1.2047,
131
+ "step": 90
132
+ },
133
+ {
134
+ "epoch": 0.23,
135
+ "learning_rate": 1.7417625312098453e-05,
136
+ "loss": 1.219,
137
+ "step": 95
138
+ },
139
+ {
140
+ "epoch": 0.25,
141
+ "learning_rate": 1.7152662630033506e-05,
142
+ "loss": 1.2135,
143
+ "step": 100
144
+ },
145
+ {
146
+ "epoch": 0.26,
147
+ "learning_rate": 1.6876994588534234e-05,
148
+ "loss": 1.2006,
149
+ "step": 105
150
+ },
151
+ {
152
+ "epoch": 0.27,
153
+ "learning_rate": 1.659103377877423e-05,
154
+ "loss": 1.1977,
155
+ "step": 110
156
+ },
157
+ {
158
+ "epoch": 0.28,
159
+ "learning_rate": 1.629520819706912e-05,
160
+ "loss": 1.2003,
161
+ "step": 115
162
+ },
163
+ {
164
+ "epoch": 0.3,
165
+ "learning_rate": 1.598996060429634e-05,
166
+ "loss": 1.2023,
167
+ "step": 120
168
+ },
169
+ {
170
+ "epoch": 0.31,
171
+ "learning_rate": 1.56757478632168e-05,
172
+ "loss": 1.2081,
173
+ "step": 125
174
+ },
175
+ {
176
+ "epoch": 0.32,
177
+ "learning_rate": 1.5353040254690396e-05,
178
+ "loss": 1.1959,
179
+ "step": 130
180
+ },
181
+ {
182
+ "epoch": 0.33,
183
+ "learning_rate": 1.5022320773808612e-05,
184
+ "loss": 1.1897,
185
+ "step": 135
186
+ },
187
+ {
188
+ "epoch": 0.34,
189
+ "learning_rate": 1.4684084406997903e-05,
190
+ "loss": 1.1958,
191
+ "step": 140
192
+ },
193
+ {
194
+ "epoch": 0.36,
195
+ "learning_rate": 1.4338837391175582e-05,
196
+ "loss": 1.1818,
197
+ "step": 145
198
+ },
199
+ {
200
+ "epoch": 0.37,
201
+ "learning_rate": 1.3987096456067236e-05,
202
+ "loss": 1.1937,
203
+ "step": 150
204
+ },
205
+ {
206
+ "epoch": 0.38,
207
+ "learning_rate": 1.3629388050819547e-05,
208
+ "loss": 1.186,
209
+ "step": 155
210
+ },
211
+ {
212
+ "epoch": 0.39,
213
+ "learning_rate": 1.3266247556066122e-05,
214
+ "loss": 1.1851,
215
+ "step": 160
216
+ },
217
+ {
218
+ "epoch": 0.41,
219
+ "learning_rate": 1.2898218482625606e-05,
220
+ "loss": 1.185,
221
+ "step": 165
222
+ },
223
+ {
224
+ "epoch": 0.42,
225
+ "learning_rate": 1.252585165803135e-05,
226
+ "loss": 1.1964,
227
+ "step": 170
228
+ },
229
+ {
230
+ "epoch": 0.43,
231
+ "learning_rate": 1.2149704402110243e-05,
232
+ "loss": 1.1899,
233
+ "step": 175
234
+ },
235
+ {
236
+ "epoch": 0.44,
237
+ "learning_rate": 1.1770339692844484e-05,
238
+ "loss": 1.1901,
239
+ "step": 180
240
+ },
241
+ {
242
+ "epoch": 0.46,
243
+ "learning_rate": 1.1388325323764889e-05,
244
+ "loss": 1.179,
245
+ "step": 185
246
+ },
247
+ {
248
+ "epoch": 0.47,
249
+ "learning_rate": 1.1004233054136726e-05,
250
+ "loss": 1.1816,
251
+ "step": 190
252
+ },
253
+ {
254
+ "epoch": 0.48,
255
+ "learning_rate": 1.0618637753210086e-05,
256
+ "loss": 1.1929,
257
+ "step": 195
258
+ },
259
+ {
260
+ "epoch": 0.49,
261
+ "learning_rate": 1.0232116539815558e-05,
262
+ "loss": 1.1995,
263
+ "step": 200
264
+ },
265
+ {
266
+ "epoch": 0.5,
267
+ "learning_rate": 9.845247918592937e-06,
268
+ "loss": 1.1818,
269
+ "step": 205
270
+ },
271
+ {
272
+ "epoch": 0.52,
273
+ "learning_rate": 9.458610914145826e-06,
274
+ "loss": 1.1844,
275
+ "step": 210
276
+ },
277
+ {
278
+ "epoch": 0.53,
279
+ "learning_rate": 9.072784204417995e-06,
280
+ "loss": 1.1863,
281
+ "step": 215
282
+ },
283
+ {
284
+ "epoch": 0.54,
285
+ "learning_rate": 8.688345254588579e-06,
286
+ "loss": 1.1884,
287
+ "step": 220
288
+ },
289
+ {
290
+ "epoch": 0.55,
291
+ "learning_rate": 8.305869452782446e-06,
292
+ "loss": 1.1918,
293
+ "step": 225
294
+ },
295
+ {
296
+ "epoch": 0.57,
297
+ "learning_rate": 7.92592924888925e-06,
298
+ "loss": 1.1751,
299
+ "step": 230
300
+ },
301
+ {
302
+ "epoch": 0.58,
303
+ "learning_rate": 7.549093297780133e-06,
304
+ "loss": 1.1807,
305
+ "step": 235
306
+ },
307
+ {
308
+ "epoch": 0.59,
309
+ "learning_rate": 7.175925608204428e-06,
310
+ "loss": 1.1807,
311
+ "step": 240
312
+ },
313
+ {
314
+ "epoch": 0.6,
315
+ "learning_rate": 6.806984698640202e-06,
316
+ "loss": 1.1865,
317
+ "step": 245
318
+ },
319
+ {
320
+ "epoch": 0.62,
321
+ "learning_rate": 6.442822761362015e-06,
322
+ "loss": 1.1783,
323
+ "step": 250
324
+ },
325
+ {
326
+ "epoch": 0.62,
327
+ "eval_loss": 1.1858137845993042,
328
+ "eval_runtime": 399.1577,
329
+ "eval_samples_per_second": 57.897,
330
+ "eval_steps_per_second": 1.811,
331
+ "step": 250
332
+ },
333
+ {
334
+ "epoch": 0.62,
335
+ "step": 250,
336
+ "total_flos": 0.0,
337
+ "train_loss": 1.2281424560546874,
338
+ "train_runtime": 11653.8648,
339
+ "train_samples_per_second": 17.837,
340
+ "train_steps_per_second": 0.035
341
+ }
342
+ ],
343
+ "logging_steps": 5,
344
+ "max_steps": 406,
345
+ "num_train_epochs": 1,
346
+ "save_steps": 500,
347
+ "total_flos": 0.0,
348
+ "trial_name": null,
349
+ "trial_params": null
350
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57926bd03e607eb3956669a5f98a9d6d4703fa48a456b72502dd5064a06a087f
3
+ size 5624