madlag commited on
Commit
716b039
1 Parent(s): 4aff22a

Initial revision.

Browse files
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-large-uncased",
3
+ "architectures": [
4
+ "BertForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 24,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "transformers_version": "4.5.0.dev0",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 30522
24
+ }
eval_results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {'exact': 78.6321906847469, 'f1': 81.5816656803201, 'total': 11873, 'HasAns_exact': 73.73481781376518, 'HasAns_f1': 79.64222615088413, 'HasAns_total': 5928, 'NoAns_exact': 83.51555929352396, 'NoAns_f1': 83.51555929352396, 'NoAns_total': 5945, 'best_exact': 78.6321906847469, 'best_exact_thresh': 0.0, 'best_f1': 81.58166568032006, 'best_f1_thresh': 0.0, 'epoch': 1.59}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2403aa6b558c922393a8a0d0f58920bfdd9036cb631d992e9d3c2191200aa117
3
+ size 1336547639
run.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_qa.py \
2
+ --model_name_or_path bert-large-uncased \
3
+ --dataset_name squad_v2 \
4
+ --do_train \
5
+ --do_eval \
6
+ --save_steps 2500 \
7
+ --eval_steps 2500 \
8
+ --evaluation_strategy steps \
9
+ --per_device_train_batch_size 12 \
10
+ --learning_rate 3e-5 \
11
+ --num_train_epochs 2 \
12
+ --max_seq_length 384 \
13
+ --doc_stride 128 \
14
+ --output_dir bert-large-uncased-squadv2 \
15
+ --version_2_with_negative 1
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5848985f52ec5922c5fc3e341345b65668fd7e6496fab9e090deab4a4bc50567
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-large-uncased"}
trainer_state.json ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.5938069216757742,
5
+ "global_step": 17500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.05,
12
+ "learning_rate": 2.9316939890710385e-05,
13
+ "loss": 2.0652,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.09,
18
+ "learning_rate": 2.8633879781420765e-05,
19
+ "loss": 1.4682,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.14,
24
+ "learning_rate": 2.795081967213115e-05,
25
+ "loss": 1.2878,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.18,
30
+ "learning_rate": 2.7267759562841533e-05,
31
+ "loss": 1.2088,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.23,
36
+ "learning_rate": 2.6584699453551913e-05,
37
+ "loss": 1.1539,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "HasAns_exact": 59.78407557354926,
42
+ "HasAns_f1": 65.49896652234426,
43
+ "HasAns_total": 5928,
44
+ "NoAns_exact": 80.87468460891506,
45
+ "NoAns_f1": 80.87468460891506,
46
+ "NoAns_total": 5945,
47
+ "best_exact": 70.34447907015918,
48
+ "best_exact_thresh": 0.0,
49
+ "best_f1": 73.19783319670329,
50
+ "best_f1_thresh": 0.0,
51
+ "epoch": 0.23,
52
+ "exact": 70.34447907015918,
53
+ "f1": 73.19783319670331,
54
+ "step": 2500,
55
+ "total": 11873
56
+ },
57
+ {
58
+ "epoch": 0.27,
59
+ "learning_rate": 2.5901639344262294e-05,
60
+ "loss": 1.1802,
61
+ "step": 3000
62
+ },
63
+ {
64
+ "epoch": 0.32,
65
+ "learning_rate": 2.5218579234972678e-05,
66
+ "loss": 1.0921,
67
+ "step": 3500
68
+ },
69
+ {
70
+ "epoch": 0.36,
71
+ "learning_rate": 2.453551912568306e-05,
72
+ "loss": 1.047,
73
+ "step": 4000
74
+ },
75
+ {
76
+ "epoch": 0.41,
77
+ "learning_rate": 2.3852459016393442e-05,
78
+ "loss": 1.0404,
79
+ "step": 4500
80
+ },
81
+ {
82
+ "epoch": 0.46,
83
+ "learning_rate": 2.3169398907103826e-05,
84
+ "loss": 1.0074,
85
+ "step": 5000
86
+ },
87
+ {
88
+ "HasAns_exact": 65.9412955465587,
89
+ "HasAns_f1": 71.69124855898053,
90
+ "HasAns_total": 5928,
91
+ "NoAns_exact": 83.11185870479395,
92
+ "NoAns_f1": 83.11185870479395,
93
+ "NoAns_total": 5945,
94
+ "best_exact": 74.53886970437127,
95
+ "best_exact_thresh": 0.0,
96
+ "best_f1": 77.40972976144505,
97
+ "best_f1_thresh": 0.0,
98
+ "epoch": 0.46,
99
+ "exact": 74.53886970437127,
100
+ "f1": 77.40972976144509,
101
+ "step": 5000,
102
+ "total": 11873
103
+ },
104
+ {
105
+ "epoch": 0.5,
106
+ "learning_rate": 2.248633879781421e-05,
107
+ "loss": 1.017,
108
+ "step": 5500
109
+ },
110
+ {
111
+ "epoch": 0.55,
112
+ "learning_rate": 2.180327868852459e-05,
113
+ "loss": 0.9725,
114
+ "step": 6000
115
+ },
116
+ {
117
+ "epoch": 0.59,
118
+ "learning_rate": 2.1120218579234974e-05,
119
+ "loss": 0.9779,
120
+ "step": 6500
121
+ },
122
+ {
123
+ "epoch": 0.64,
124
+ "learning_rate": 2.0437158469945358e-05,
125
+ "loss": 0.9617,
126
+ "step": 7000
127
+ },
128
+ {
129
+ "epoch": 0.68,
130
+ "learning_rate": 1.975409836065574e-05,
131
+ "loss": 0.929,
132
+ "step": 7500
133
+ },
134
+ {
135
+ "HasAns_exact": 72.33468286099865,
136
+ "HasAns_f1": 78.59956513847068,
137
+ "HasAns_total": 5928,
138
+ "NoAns_exact": 79.22624053826745,
139
+ "NoAns_f1": 79.22624053826745,
140
+ "NoAns_total": 5945,
141
+ "best_exact": 75.78539543502063,
142
+ "best_exact_thresh": 0.0,
143
+ "best_f1": 78.9133514815846,
144
+ "best_f1_thresh": 0.0,
145
+ "epoch": 0.68,
146
+ "exact": 75.78539543502063,
147
+ "f1": 78.9133514815847,
148
+ "step": 7500,
149
+ "total": 11873
150
+ },
151
+ {
152
+ "epoch": 0.73,
153
+ "learning_rate": 1.907103825136612e-05,
154
+ "loss": 0.9407,
155
+ "step": 8000
156
+ },
157
+ {
158
+ "epoch": 0.77,
159
+ "learning_rate": 1.8387978142076503e-05,
160
+ "loss": 0.921,
161
+ "step": 8500
162
+ },
163
+ {
164
+ "epoch": 0.82,
165
+ "learning_rate": 1.7704918032786887e-05,
166
+ "loss": 0.9025,
167
+ "step": 9000
168
+ },
169
+ {
170
+ "epoch": 0.87,
171
+ "learning_rate": 1.7021857923497267e-05,
172
+ "loss": 0.8724,
173
+ "step": 9500
174
+ },
175
+ {
176
+ "epoch": 0.91,
177
+ "learning_rate": 1.633879781420765e-05,
178
+ "loss": 0.8794,
179
+ "step": 10000
180
+ },
181
+ {
182
+ "HasAns_exact": 75.55668016194332,
183
+ "HasAns_f1": 81.90355530149,
184
+ "HasAns_total": 5928,
185
+ "NoAns_exact": 75.02102607232969,
186
+ "NoAns_f1": 75.02102607232969,
187
+ "NoAns_total": 5945,
188
+ "best_exact": 75.2884696369915,
189
+ "best_exact_thresh": 0.0,
190
+ "best_f1": 78.45736341507896,
191
+ "best_f1_thresh": 0.0,
192
+ "epoch": 0.91,
193
+ "exact": 75.2884696369915,
194
+ "f1": 78.45736341507894,
195
+ "step": 10000,
196
+ "total": 11873
197
+ },
198
+ {
199
+ "epoch": 0.96,
200
+ "learning_rate": 1.5655737704918035e-05,
201
+ "loss": 0.8788,
202
+ "step": 10500
203
+ },
204
+ {
205
+ "epoch": 1.0,
206
+ "learning_rate": 1.4972677595628415e-05,
207
+ "loss": 0.8614,
208
+ "step": 11000
209
+ },
210
+ {
211
+ "epoch": 1.05,
212
+ "learning_rate": 1.4289617486338798e-05,
213
+ "loss": 0.5678,
214
+ "step": 11500
215
+ },
216
+ {
217
+ "epoch": 1.09,
218
+ "learning_rate": 1.3606557377049181e-05,
219
+ "loss": 0.5483,
220
+ "step": 12000
221
+ },
222
+ {
223
+ "epoch": 1.14,
224
+ "learning_rate": 1.2923497267759564e-05,
225
+ "loss": 0.5724,
226
+ "step": 12500
227
+ },
228
+ {
229
+ "HasAns_exact": 73.582995951417,
230
+ "HasAns_f1": 80.21421656448703,
231
+ "HasAns_total": 5928,
232
+ "NoAns_exact": 80.31959629941127,
233
+ "NoAns_f1": 80.31959629941127,
234
+ "NoAns_total": 5945,
235
+ "best_exact": 76.95611892529269,
236
+ "best_exact_thresh": 0.0,
237
+ "best_f1": 80.26698187436004,
238
+ "best_f1_thresh": 0.0,
239
+ "epoch": 1.14,
240
+ "exact": 76.95611892529269,
241
+ "f1": 80.26698187436024,
242
+ "step": 12500,
243
+ "total": 11873
244
+ },
245
+ {
246
+ "epoch": 1.18,
247
+ "learning_rate": 1.2240437158469946e-05,
248
+ "loss": 0.5641,
249
+ "step": 13000
250
+ },
251
+ {
252
+ "epoch": 1.23,
253
+ "learning_rate": 1.1557377049180328e-05,
254
+ "loss": 0.5712,
255
+ "step": 13500
256
+ },
257
+ {
258
+ "epoch": 1.28,
259
+ "learning_rate": 1.087431693989071e-05,
260
+ "loss": 0.5751,
261
+ "step": 14000
262
+ },
263
+ {
264
+ "epoch": 1.32,
265
+ "learning_rate": 1.0191256830601094e-05,
266
+ "loss": 0.5325,
267
+ "step": 14500
268
+ },
269
+ {
270
+ "epoch": 1.37,
271
+ "learning_rate": 9.508196721311476e-06,
272
+ "loss": 0.5416,
273
+ "step": 15000
274
+ },
275
+ {
276
+ "HasAns_exact": 72.94197031039137,
277
+ "HasAns_f1": 79.45948359952195,
278
+ "HasAns_total": 5928,
279
+ "NoAns_exact": 83.06139613120268,
280
+ "NoAns_f1": 83.06139613120268,
281
+ "NoAns_total": 5945,
282
+ "best_exact": 78.00892781942221,
283
+ "best_exact_thresh": 0.0,
284
+ "best_f1": 81.26301851073573,
285
+ "best_f1_thresh": 0.0,
286
+ "epoch": 1.37,
287
+ "exact": 78.00892781942221,
288
+ "f1": 81.2630185107358,
289
+ "step": 15000,
290
+ "total": 11873
291
+ },
292
+ {
293
+ "epoch": 1.41,
294
+ "learning_rate": 8.825136612021857e-06,
295
+ "loss": 0.5361,
296
+ "step": 15500
297
+ },
298
+ {
299
+ "epoch": 1.46,
300
+ "learning_rate": 8.14207650273224e-06,
301
+ "loss": 0.5417,
302
+ "step": 16000
303
+ },
304
+ {
305
+ "epoch": 1.5,
306
+ "learning_rate": 7.459016393442623e-06,
307
+ "loss": 0.5478,
308
+ "step": 16500
309
+ },
310
+ {
311
+ "epoch": 1.55,
312
+ "learning_rate": 6.775956284153006e-06,
313
+ "loss": 0.5157,
314
+ "step": 17000
315
+ },
316
+ {
317
+ "epoch": 1.59,
318
+ "learning_rate": 6.092896174863388e-06,
319
+ "loss": 0.5517,
320
+ "step": 17500
321
+ },
322
+ {
323
+ "HasAns_exact": 73.73481781376518,
324
+ "HasAns_f1": 79.64222615088413,
325
+ "HasAns_total": 5928,
326
+ "NoAns_exact": 83.51555929352396,
327
+ "NoAns_f1": 83.51555929352396,
328
+ "NoAns_total": 5945,
329
+ "best_exact": 78.6321906847469,
330
+ "best_exact_thresh": 0.0,
331
+ "best_f1": 81.58166568032006,
332
+ "best_f1_thresh": 0.0,
333
+ "epoch": 1.59,
334
+ "exact": 78.6321906847469,
335
+ "f1": 81.5816656803201,
336
+ "step": 17500,
337
+ "total": 11873
338
+ }
339
+ ],
340
+ "max_steps": 21960,
341
+ "num_train_epochs": 2,
342
+ "total_flos": 1.616435859777915e+17,
343
+ "trial_name": null,
344
+ "trial_params": null
345
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11c9922bff70152d6882db2c0e1bcf88b1d47aa6153da091a6a0230a18497005
3
+ size 2351
vocab.txt ADDED
The diff for this file is too large to render. See raw diff