JulienRPA commited on
Commit
5e9a9ab
1 Parent(s): 017b9bc

Model save

Browse files
last-checkpoint/added_tokens.json DELETED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/config.json DELETED
@@ -1,182 +0,0 @@
1
- {
2
- "_commit_hash": "bf0ba286dd1951b2990a0aa57cf72f5caa2c4bfb",
3
- "_name_or_path": "JulienRPA/BERT2BERT_pretrained_LC-QuAD_2.0",
4
- "architectures": [
5
- "EncoderDecoderModel"
6
- ],
7
- "decoder": {
8
- "_name_or_path": "decoder_added_vocab",
9
- "add_cross_attention": true,
10
- "architectures": [
11
- "BertModel"
12
- ],
13
- "attention_probs_dropout_prob": 0.1,
14
- "bad_words_ids": null,
15
- "begin_suppress_tokens": null,
16
- "bos_token_id": null,
17
- "chunk_size_feed_forward": 0,
18
- "classifier_dropout": null,
19
- "cross_attention_hidden_size": null,
20
- "decoder_start_token_id": null,
21
- "diversity_penalty": 0.0,
22
- "do_sample": false,
23
- "early_stopping": false,
24
- "encoder_no_repeat_ngram_size": 0,
25
- "eos_token_id": null,
26
- "exponential_decay_length_penalty": null,
27
- "finetuning_task": null,
28
- "forced_bos_token_id": null,
29
- "forced_eos_token_id": null,
30
- "gradient_checkpointing": false,
31
- "hidden_act": "gelu",
32
- "hidden_dropout_prob": 0.1,
33
- "hidden_size": 768,
34
- "id2label": {
35
- "0": "LABEL_0",
36
- "1": "LABEL_1"
37
- },
38
- "initializer_range": 0.02,
39
- "intermediate_size": 3072,
40
- "is_decoder": true,
41
- "is_encoder_decoder": false,
42
- "label2id": {
43
- "LABEL_0": 0,
44
- "LABEL_1": 1
45
- },
46
- "layer_norm_eps": 1e-12,
47
- "length_penalty": 1.0,
48
- "max_length": 20,
49
- "max_position_embeddings": 512,
50
- "min_length": 0,
51
- "model_type": "bert",
52
- "no_repeat_ngram_size": 0,
53
- "num_attention_heads": 12,
54
- "num_beam_groups": 1,
55
- "num_beams": 1,
56
- "num_hidden_layers": 12,
57
- "num_return_sequences": 1,
58
- "output_attentions": false,
59
- "output_hidden_states": false,
60
- "output_scores": false,
61
- "pad_token_id": 0,
62
- "position_embedding_type": "absolute",
63
- "prefix": null,
64
- "problem_type": null,
65
- "pruned_heads": {},
66
- "remove_invalid_values": false,
67
- "repetition_penalty": 1.0,
68
- "return_dict": true,
69
- "return_dict_in_generate": false,
70
- "sep_token_id": null,
71
- "suppress_tokens": null,
72
- "task_specific_params": null,
73
- "temperature": 1.0,
74
- "tf_legacy_loss": false,
75
- "tie_encoder_decoder": false,
76
- "tie_word_embeddings": true,
77
- "tokenizer_class": null,
78
- "top_k": 50,
79
- "top_p": 1.0,
80
- "torch_dtype": "float32",
81
- "torchscript": false,
82
- "transformers_version": "4.30.0.dev0",
83
- "type_vocab_size": 2,
84
- "typical_p": 1.0,
85
- "use_bfloat16": false,
86
- "use_cache": true,
87
- "vocab_size": 34522
88
- },
89
- "decoder_start_token_id": 101,
90
- "early_stopping": true,
91
- "encoder": {
92
- "_name_or_path": "encoder_added_vocab",
93
- "add_cross_attention": false,
94
- "architectures": [
95
- "BertModel"
96
- ],
97
- "attention_probs_dropout_prob": 0.1,
98
- "bad_words_ids": null,
99
- "begin_suppress_tokens": null,
100
- "bos_token_id": null,
101
- "chunk_size_feed_forward": 0,
102
- "classifier_dropout": null,
103
- "cross_attention_hidden_size": null,
104
- "decoder_start_token_id": null,
105
- "diversity_penalty": 0.0,
106
- "do_sample": false,
107
- "early_stopping": false,
108
- "encoder_no_repeat_ngram_size": 0,
109
- "eos_token_id": null,
110
- "exponential_decay_length_penalty": null,
111
- "finetuning_task": null,
112
- "forced_bos_token_id": null,
113
- "forced_eos_token_id": null,
114
- "hidden_act": "gelu",
115
- "hidden_dropout_prob": 0.1,
116
- "hidden_size": 768,
117
- "id2label": {
118
- "0": "LABEL_0",
119
- "1": "LABEL_1"
120
- },
121
- "initializer_range": 0.02,
122
- "intermediate_size": 3072,
123
- "is_decoder": false,
124
- "is_encoder_decoder": false,
125
- "label2id": {
126
- "LABEL_0": 0,
127
- "LABEL_1": 1
128
- },
129
- "layer_norm_eps": 1e-12,
130
- "length_penalty": 1.0,
131
- "max_length": 20,
132
- "max_position_embeddings": 512,
133
- "min_length": 0,
134
- "model_type": "bert",
135
- "no_repeat_ngram_size": 0,
136
- "num_attention_heads": 12,
137
- "num_beam_groups": 1,
138
- "num_beams": 1,
139
- "num_hidden_layers": 12,
140
- "num_return_sequences": 1,
141
- "output_attentions": false,
142
- "output_hidden_states": false,
143
- "output_scores": false,
144
- "pad_token_id": 0,
145
- "position_embedding_type": "absolute",
146
- "prefix": null,
147
- "problem_type": null,
148
- "pruned_heads": {},
149
- "remove_invalid_values": false,
150
- "repetition_penalty": 1.0,
151
- "return_dict": true,
152
- "return_dict_in_generate": false,
153
- "sep_token_id": null,
154
- "suppress_tokens": null,
155
- "task_specific_params": null,
156
- "temperature": 1.0,
157
- "tf_legacy_loss": false,
158
- "tie_encoder_decoder": false,
159
- "tie_word_embeddings": true,
160
- "tokenizer_class": null,
161
- "top_k": 50,
162
- "top_p": 1.0,
163
- "torch_dtype": "float32",
164
- "torchscript": false,
165
- "transformers_version": "4.30.0.dev0",
166
- "type_vocab_size": 2,
167
- "typical_p": 1.0,
168
- "use_bfloat16": false,
169
- "use_cache": true,
170
- "vocab_size": 34522
171
- },
172
- "eos_token_id": 102,
173
- "is_encoder_decoder": true,
174
- "max_length": 256,
175
- "min_length": 16,
176
- "model_type": "encoder-decoder",
177
- "num_beams": 10,
178
- "pad_token_id": 0,
179
- "torch_dtype": "float32",
180
- "transformers_version": null,
181
- "vocab_size": 34522
182
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/generation_config.json DELETED
@@ -1,10 +0,0 @@
1
- {
2
- "decoder_start_token_id": 101,
3
- "early_stopping": true,
4
- "eos_token_id": 102,
5
- "max_length": 256,
6
- "min_length": 16,
7
- "num_beams": 10,
8
- "pad_token_id": 0,
9
- "transformers_version": "4.30.0.dev0"
10
- }
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c7f2ae7866cbe859e9a83202cb446faa715f7b1a80217fe6e627acfe4157a7a
3
- size 2023671531
 
 
 
 
last-checkpoint/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4cc796916aefe4e38a5f73ee609fcf3cd5f4797936a93022e6f5c7a7bf2e8dd
3
- size 1014236857
 
 
 
 
last-checkpoint/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:79f52f64e625b9a5d4c4efe5f98776e33220b02b2d0b1d9ac547192bd484b739
3
- size 14575
 
 
 
 
last-checkpoint/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1fe6ae17ba1adb51797dbf26168b5b12ea8351b1902a73d1e47eeffc8523949e
3
- size 627
 
 
 
 
last-checkpoint/special_tokens_map.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
7
- }
 
 
 
 
 
 
 
 
last-checkpoint/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/tokenizer_config.json DELETED
@@ -1,15 +0,0 @@
1
- {
2
- "clean_up_tokenization_spaces": true,
3
- "cls_token": "[CLS]",
4
- "do_basic_tokenize": true,
5
- "do_lower_case": false,
6
- "mask_token": "[MASK]",
7
- "model_max_length": 256,
8
- "never_split": null,
9
- "pad_token": "[PAD]",
10
- "sep_token": "[SEP]",
11
- "strip_accents": null,
12
- "tokenize_chinese_chars": true,
13
- "tokenizer_class": "BertTokenizer",
14
- "unk_token": "[UNK]"
15
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/trainer_state.json DELETED
@@ -1,412 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 282.05128205128204,
5
- "global_step": 11000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 12.82,
12
- "learning_rate": 1.24e-05,
13
- "loss": 4.0279,
14
- "step": 500
15
- },
16
- {
17
- "epoch": 12.82,
18
- "eval_bleu": 49.841,
19
- "eval_em": 0.0,
20
- "eval_gen_len": 51.6403,
21
- "eval_loss": 2.4031472206115723,
22
- "eval_rm": 0.0,
23
- "eval_runtime": 118.4236,
24
- "eval_samples_per_second": 3.521,
25
- "eval_steps_per_second": 0.448,
26
- "step": 500
27
- },
28
- {
29
- "epoch": 25.64,
30
- "learning_rate": 2.4900000000000002e-05,
31
- "loss": 1.3442,
32
- "step": 1000
33
- },
34
- {
35
- "epoch": 25.64,
36
- "eval_bleu": 85.0177,
37
- "eval_em": 0.0,
38
- "eval_gen_len": 57.9784,
39
- "eval_loss": 0.501366138458252,
40
- "eval_rm": 0.0,
41
- "eval_runtime": 163.8536,
42
- "eval_samples_per_second": 2.545,
43
- "eval_steps_per_second": 0.323,
44
- "step": 1000
45
- },
46
- {
47
- "epoch": 38.46,
48
- "learning_rate": 3.74e-05,
49
- "loss": 0.2522,
50
- "step": 1500
51
- },
52
- {
53
- "epoch": 38.46,
54
- "eval_bleu": 94.0714,
55
- "eval_em": 0.0168,
56
- "eval_gen_len": 57.9137,
57
- "eval_loss": 0.3293180763721466,
58
- "eval_rm": 0.0216,
59
- "eval_runtime": 113.0226,
60
- "eval_samples_per_second": 3.69,
61
- "eval_steps_per_second": 0.469,
62
- "step": 1500
63
- },
64
- {
65
- "epoch": 51.28,
66
- "learning_rate": 4.99e-05,
67
- "loss": 0.1534,
68
- "step": 2000
69
- },
70
- {
71
- "epoch": 51.28,
72
- "eval_bleu": 94.4328,
73
- "eval_em": 0.0024,
74
- "eval_gen_len": 58.9448,
75
- "eval_loss": 0.320736825466156,
76
- "eval_rm": 0.0072,
77
- "eval_runtime": 116.3746,
78
- "eval_samples_per_second": 3.583,
79
- "eval_steps_per_second": 0.455,
80
- "step": 2000
81
- },
82
- {
83
- "epoch": 64.1,
84
- "learning_rate": 4.744329896907217e-05,
85
- "loss": 0.1305,
86
- "step": 2500
87
- },
88
- {
89
- "epoch": 64.1,
90
- "eval_bleu": 94.0708,
91
- "eval_em": 0.0,
92
- "eval_gen_len": 59.6115,
93
- "eval_loss": 0.3247060477733612,
94
- "eval_rm": 0.0,
95
- "eval_runtime": 117.0173,
96
- "eval_samples_per_second": 3.564,
97
- "eval_steps_per_second": 0.453,
98
- "step": 2500
99
- },
100
- {
101
- "epoch": 76.92,
102
- "learning_rate": 4.48659793814433e-05,
103
- "loss": 0.1226,
104
- "step": 3000
105
- },
106
- {
107
- "epoch": 76.92,
108
- "eval_bleu": 94.3143,
109
- "eval_em": 0.0024,
110
- "eval_gen_len": 58.235,
111
- "eval_loss": 0.33251264691352844,
112
- "eval_rm": 0.0024,
113
- "eval_runtime": 119.1624,
114
- "eval_samples_per_second": 3.499,
115
- "eval_steps_per_second": 0.445,
116
- "step": 3000
117
- },
118
- {
119
- "epoch": 89.74,
120
- "learning_rate": 4.228865979381443e-05,
121
- "loss": 0.1131,
122
- "step": 3500
123
- },
124
- {
125
- "epoch": 89.74,
126
- "eval_bleu": 94.5678,
127
- "eval_em": 0.0048,
128
- "eval_gen_len": 59.6811,
129
- "eval_loss": 0.3400600850582123,
130
- "eval_rm": 0.0144,
131
- "eval_runtime": 116.7251,
132
- "eval_samples_per_second": 3.572,
133
- "eval_steps_per_second": 0.454,
134
- "step": 3500
135
- },
136
- {
137
- "epoch": 102.56,
138
- "learning_rate": 3.971134020618557e-05,
139
- "loss": 0.1053,
140
- "step": 4000
141
- },
142
- {
143
- "epoch": 102.56,
144
- "eval_bleu": 94.4738,
145
- "eval_em": 0.0168,
146
- "eval_gen_len": 59.0288,
147
- "eval_loss": 0.3373829424381256,
148
- "eval_rm": 0.0552,
149
- "eval_runtime": 118.4954,
150
- "eval_samples_per_second": 3.519,
151
- "eval_steps_per_second": 0.447,
152
- "step": 4000
153
- },
154
- {
155
- "epoch": 115.38,
156
- "learning_rate": 3.71340206185567e-05,
157
- "loss": 0.0999,
158
- "step": 4500
159
- },
160
- {
161
- "epoch": 115.38,
162
- "eval_bleu": 94.6291,
163
- "eval_em": 0.0336,
164
- "eval_gen_len": 58.6283,
165
- "eval_loss": 0.3437003791332245,
166
- "eval_rm": 0.0624,
167
- "eval_runtime": 119.5949,
168
- "eval_samples_per_second": 3.487,
169
- "eval_steps_per_second": 0.443,
170
- "step": 4500
171
- },
172
- {
173
- "epoch": 128.21,
174
- "learning_rate": 3.455670103092783e-05,
175
- "loss": 0.0941,
176
- "step": 5000
177
- },
178
- {
179
- "epoch": 128.21,
180
- "eval_bleu": 94.7896,
181
- "eval_em": 0.0695,
182
- "eval_gen_len": 58.4149,
183
- "eval_loss": 0.351246178150177,
184
- "eval_rm": 0.1271,
185
- "eval_runtime": 121.3634,
186
- "eval_samples_per_second": 3.436,
187
- "eval_steps_per_second": 0.437,
188
- "step": 5000
189
- },
190
- {
191
- "epoch": 141.03,
192
- "learning_rate": 3.197938144329897e-05,
193
- "loss": 0.0904,
194
- "step": 5500
195
- },
196
- {
197
- "epoch": 141.03,
198
- "eval_bleu": 94.4101,
199
- "eval_em": 0.0719,
200
- "eval_gen_len": 58.2518,
201
- "eval_loss": 0.34235823154449463,
202
- "eval_rm": 0.1439,
203
- "eval_runtime": 118.818,
204
- "eval_samples_per_second": 3.51,
205
- "eval_steps_per_second": 0.446,
206
- "step": 5500
207
- },
208
- {
209
- "epoch": 153.85,
210
- "learning_rate": 2.9402061855670106e-05,
211
- "loss": 0.0833,
212
- "step": 6000
213
- },
214
- {
215
- "epoch": 153.85,
216
- "eval_bleu": 94.7141,
217
- "eval_em": 0.0887,
218
- "eval_gen_len": 59.0312,
219
- "eval_loss": 0.3461511433124542,
220
- "eval_rm": 0.1775,
221
- "eval_runtime": 116.2495,
222
- "eval_samples_per_second": 3.587,
223
- "eval_steps_per_second": 0.456,
224
- "step": 6000
225
- },
226
- {
227
- "epoch": 166.67,
228
- "learning_rate": 2.6824742268041237e-05,
229
- "loss": 0.0772,
230
- "step": 6500
231
- },
232
- {
233
- "epoch": 166.67,
234
- "eval_bleu": 94.6758,
235
- "eval_em": 0.0911,
236
- "eval_gen_len": 59.0767,
237
- "eval_loss": 0.34671926498413086,
238
- "eval_rm": 0.2062,
239
- "eval_runtime": 116.1647,
240
- "eval_samples_per_second": 3.59,
241
- "eval_steps_per_second": 0.456,
242
- "step": 6500
243
- },
244
- {
245
- "epoch": 179.49,
246
- "learning_rate": 2.4247422680412372e-05,
247
- "loss": 0.0722,
248
- "step": 7000
249
- },
250
- {
251
- "epoch": 179.49,
252
- "eval_bleu": 94.5698,
253
- "eval_em": 0.1055,
254
- "eval_gen_len": 58.1415,
255
- "eval_loss": 0.3461613953113556,
256
- "eval_rm": 0.2398,
257
- "eval_runtime": 119.2771,
258
- "eval_samples_per_second": 3.496,
259
- "eval_steps_per_second": 0.444,
260
- "step": 7000
261
- },
262
- {
263
- "epoch": 192.31,
264
- "learning_rate": 2.1670103092783507e-05,
265
- "loss": 0.0669,
266
- "step": 7500
267
- },
268
- {
269
- "epoch": 192.31,
270
- "eval_bleu": 95.0365,
271
- "eval_em": 0.1223,
272
- "eval_gen_len": 58.7794,
273
- "eval_loss": 0.35367459058761597,
274
- "eval_rm": 0.2782,
275
- "eval_runtime": 115.018,
276
- "eval_samples_per_second": 3.626,
277
- "eval_steps_per_second": 0.461,
278
- "step": 7500
279
- },
280
- {
281
- "epoch": 205.13,
282
- "learning_rate": 1.9092783505154642e-05,
283
- "loss": 0.062,
284
- "step": 8000
285
- },
286
- {
287
- "epoch": 205.13,
288
- "eval_bleu": 94.8694,
289
- "eval_em": 0.1247,
290
- "eval_gen_len": 58.211,
291
- "eval_loss": 0.35051023960113525,
292
- "eval_rm": 0.2686,
293
- "eval_runtime": 113.7476,
294
- "eval_samples_per_second": 3.666,
295
- "eval_steps_per_second": 0.466,
296
- "step": 8000
297
- },
298
- {
299
- "epoch": 217.95,
300
- "learning_rate": 1.6515463917525774e-05,
301
- "loss": 0.0576,
302
- "step": 8500
303
- },
304
- {
305
- "epoch": 217.95,
306
- "eval_bleu": 94.8168,
307
- "eval_em": 0.1271,
308
- "eval_gen_len": 59.0791,
309
- "eval_loss": 0.3510896563529968,
310
- "eval_rm": 0.2926,
311
- "eval_runtime": 117.1223,
312
- "eval_samples_per_second": 3.56,
313
- "eval_steps_per_second": 0.453,
314
- "step": 8500
315
- },
316
- {
317
- "epoch": 230.77,
318
- "learning_rate": 1.3938144329896907e-05,
319
- "loss": 0.0539,
320
- "step": 9000
321
- },
322
- {
323
- "epoch": 230.77,
324
- "eval_bleu": 95.1935,
325
- "eval_em": 0.1367,
326
- "eval_gen_len": 58.6787,
327
- "eval_loss": 0.34899094700813293,
328
- "eval_rm": 0.3046,
329
- "eval_runtime": 117.1796,
330
- "eval_samples_per_second": 3.559,
331
- "eval_steps_per_second": 0.452,
332
- "step": 9000
333
- },
334
- {
335
- "epoch": 243.59,
336
- "learning_rate": 1.1360824742268042e-05,
337
- "loss": 0.0502,
338
- "step": 9500
339
- },
340
- {
341
- "epoch": 243.59,
342
- "eval_bleu": 95.1882,
343
- "eval_em": 0.1319,
344
- "eval_gen_len": 58.5228,
345
- "eval_loss": 0.3490062654018402,
346
- "eval_rm": 0.3141,
347
- "eval_runtime": 118.559,
348
- "eval_samples_per_second": 3.517,
349
- "eval_steps_per_second": 0.447,
350
- "step": 9500
351
- },
352
- {
353
- "epoch": 256.41,
354
- "learning_rate": 8.783505154639175e-06,
355
- "loss": 0.0473,
356
- "step": 10000
357
- },
358
- {
359
- "epoch": 256.41,
360
- "eval_bleu": 95.1198,
361
- "eval_em": 0.1319,
362
- "eval_gen_len": 58.4245,
363
- "eval_loss": 0.3504057824611664,
364
- "eval_rm": 0.307,
365
- "eval_runtime": 118.462,
366
- "eval_samples_per_second": 3.52,
367
- "eval_steps_per_second": 0.447,
368
- "step": 10000
369
- },
370
- {
371
- "epoch": 269.23,
372
- "learning_rate": 6.206185567010309e-06,
373
- "loss": 0.045,
374
- "step": 10500
375
- },
376
- {
377
- "epoch": 269.23,
378
- "eval_bleu": 95.047,
379
- "eval_em": 0.1343,
380
- "eval_gen_len": 58.3213,
381
- "eval_loss": 0.35046613216400146,
382
- "eval_rm": 0.307,
383
- "eval_runtime": 118.1147,
384
- "eval_samples_per_second": 3.53,
385
- "eval_steps_per_second": 0.449,
386
- "step": 10500
387
- },
388
- {
389
- "epoch": 282.05,
390
- "learning_rate": 3.6288659793814435e-06,
391
- "loss": 0.0429,
392
- "step": 11000
393
- },
394
- {
395
- "epoch": 282.05,
396
- "eval_bleu": 95.2397,
397
- "eval_em": 0.1391,
398
- "eval_gen_len": 58.7242,
399
- "eval_loss": 0.3522409200668335,
400
- "eval_rm": 0.3046,
401
- "eval_runtime": 119.4326,
402
- "eval_samples_per_second": 3.492,
403
- "eval_steps_per_second": 0.444,
404
- "step": 11000
405
- }
406
- ],
407
- "max_steps": 11700,
408
- "num_train_epochs": 300,
409
- "total_flos": 8893000475652480.0,
410
- "trial_name": null,
411
- "trial_params": null
412
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:48f4bc00b89cc61426524602270e52394137beccf4e05eb3ce2d552fad490150
3
- size 4155
 
 
 
 
last-checkpoint/vocab.txt DELETED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4cc796916aefe4e38a5f73ee609fcf3cd5f4797936a93022e6f5c7a7bf2e8dd
3
  size 1014236857
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f46e800aeafb8b6440b06f8f2e84661b4334433ced607cb9e23decc05f37075
3
  size 1014236857
runs/Jun02_07-11-36_8c8a3ec9844a/events.out.tfevents.1685690790.8c8a3ec9844a.3155.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:310ed5f8c9724b666625f94c5a1a09d80670fd013a61cbed7bb7887362ed9450
3
- size 9829
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffae3aaa44884deb84b8b2ee28fd2d82b48640a15cf8ee5f99d7c29194cf4dbe
3
+ size 10802