ChrisZeng commited on
Commit
a5570bd
1 Parent(s): 55f895f

Model save

Browse files
checkpoint-1630/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli",
3
+ "architectures": [
4
+ "ElectraForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "embedding_size": 1024,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "entailment",
14
+ "1": "neutral",
15
+ "2": "contradiction"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 4096,
19
+ "label2id": {
20
+ "contradiction": 2,
21
+ "entailment": 0,
22
+ "neutral": 1
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "electra",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
+ "pad_token_id": 0,
30
+ "position_embedding_type": "absolute",
31
+ "problem_type": "multi_label_classification",
32
+ "summary_activation": "gelu",
33
+ "summary_last_dropout": 0.1,
34
+ "summary_type": "first",
35
+ "summary_use_proj": true,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.16.2",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 30522
41
+ }
checkpoint-1630/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f10ac8aa6914a68e479931a864f18ba10f4c2c3365275928cdcaec362c14888
3
+ size 2681490814
checkpoint-1630/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3f456ad6fff0711c705ef4a02fde64a2461f640a2ebaf6d8128cf037da6c194
3
+ size 1340743917
checkpoint-1630/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19de93fa7ce45dde7d1a60c4c97b9e4ec495777424b4e2a14fb06079d8444157
3
+ size 14503
checkpoint-1630/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7e72f4673e938bbb6afc7231af6207ee922a3271ce6da4b803c942382702de9
3
+ size 559
checkpoint-1630/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c924e180775c2b50e4679ec9d5d5d33fb3eed968a176cbb01236411034beab21
3
+ size 623
checkpoint-1630/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
checkpoint-1630/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1630/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "normalization": true, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "google/electra-large-discriminator", "tokenizer_class": "ElectraTokenizer"}
checkpoint-1630/trainer_state.json ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2981628179550171,
3
+ "best_model_checkpoint": "outputs/electra-nli-efl-tweeteval/checkpoint-978",
4
+ "epoch": 9.997323135755257,
5
+ "global_step": 1630,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 9.499999999999999e-07,
13
+ "loss": 0.4384,
14
+ "step": 163
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.7444061962134251,
19
+ "eval_f1": 0.7308261375858633,
20
+ "eval_loss": 0.39615127444267273,
21
+ "eval_runtime": 9.0599,
22
+ "eval_samples_per_second": 256.514,
23
+ "eval_steps_per_second": 32.119,
24
+ "step": 163
25
+ },
26
+ {
27
+ "epoch": 2.0,
28
+ "learning_rate": 9e-07,
29
+ "loss": 0.3447,
30
+ "step": 326
31
+ },
32
+ {
33
+ "epoch": 2.0,
34
+ "eval_accuracy": 0.76592082616179,
35
+ "eval_f1": 0.7552159046464709,
36
+ "eval_loss": 0.3409559428691864,
37
+ "eval_runtime": 9.4378,
38
+ "eval_samples_per_second": 246.244,
39
+ "eval_steps_per_second": 30.833,
40
+ "step": 326
41
+ },
42
+ {
43
+ "epoch": 3.0,
44
+ "learning_rate": 8.499999999999999e-07,
45
+ "loss": 0.3057,
46
+ "step": 489
47
+ },
48
+ {
49
+ "epoch": 3.0,
50
+ "eval_accuracy": 0.7749569707401033,
51
+ "eval_f1": 0.768808341108185,
52
+ "eval_loss": 0.32338443398475647,
53
+ "eval_runtime": 9.0418,
54
+ "eval_samples_per_second": 257.028,
55
+ "eval_steps_per_second": 32.184,
56
+ "step": 489
57
+ },
58
+ {
59
+ "epoch": 4.0,
60
+ "learning_rate": 8e-07,
61
+ "loss": 0.287,
62
+ "step": 652
63
+ },
64
+ {
65
+ "epoch": 4.0,
66
+ "eval_accuracy": 0.7857142857142857,
67
+ "eval_f1": 0.7778970154753132,
68
+ "eval_loss": 0.3068828284740448,
69
+ "eval_runtime": 9.3662,
70
+ "eval_samples_per_second": 248.128,
71
+ "eval_steps_per_second": 31.069,
72
+ "step": 652
73
+ },
74
+ {
75
+ "epoch": 5.0,
76
+ "learning_rate": 7.5e-07,
77
+ "loss": 0.2742,
78
+ "step": 815
79
+ },
80
+ {
81
+ "epoch": 5.0,
82
+ "eval_accuracy": 0.7887263339070568,
83
+ "eval_f1": 0.7821763089027973,
84
+ "eval_loss": 0.30303624272346497,
85
+ "eval_runtime": 8.908,
86
+ "eval_samples_per_second": 260.89,
87
+ "eval_steps_per_second": 32.667,
88
+ "step": 815
89
+ },
90
+ {
91
+ "epoch": 6.0,
92
+ "learning_rate": 7e-07,
93
+ "loss": 0.2676,
94
+ "step": 978
95
+ },
96
+ {
97
+ "epoch": 6.0,
98
+ "eval_accuracy": 0.7938898450946644,
99
+ "eval_f1": 0.7850614050415754,
100
+ "eval_loss": 0.2981628179550171,
101
+ "eval_runtime": 9.0189,
102
+ "eval_samples_per_second": 257.681,
103
+ "eval_steps_per_second": 32.266,
104
+ "step": 978
105
+ },
106
+ {
107
+ "epoch": 7.0,
108
+ "learning_rate": 6.5e-07,
109
+ "loss": 0.2585,
110
+ "step": 1141
111
+ },
112
+ {
113
+ "epoch": 7.0,
114
+ "eval_accuracy": 0.7908777969018933,
115
+ "eval_f1": 0.7821955847641968,
116
+ "eval_loss": 0.3001907467842102,
117
+ "eval_runtime": 8.8872,
118
+ "eval_samples_per_second": 261.499,
119
+ "eval_steps_per_second": 32.744,
120
+ "step": 1141
121
+ },
122
+ {
123
+ "epoch": 8.0,
124
+ "learning_rate": 6e-07,
125
+ "loss": 0.2526,
126
+ "step": 1304
127
+ },
128
+ {
129
+ "epoch": 8.0,
130
+ "eval_accuracy": 0.7943201376936316,
131
+ "eval_f1": 0.7876461988304093,
132
+ "eval_loss": 0.30516260862350464,
133
+ "eval_runtime": 9.0042,
134
+ "eval_samples_per_second": 258.103,
135
+ "eval_steps_per_second": 32.318,
136
+ "step": 1304
137
+ },
138
+ {
139
+ "epoch": 9.0,
140
+ "learning_rate": 5.5e-07,
141
+ "loss": 0.2479,
142
+ "step": 1467
143
+ },
144
+ {
145
+ "epoch": 9.0,
146
+ "eval_accuracy": 0.7938898450946644,
147
+ "eval_f1": 0.784665589216992,
148
+ "eval_loss": 0.2996860444545746,
149
+ "eval_runtime": 9.1157,
150
+ "eval_samples_per_second": 254.944,
151
+ "eval_steps_per_second": 31.923,
152
+ "step": 1467
153
+ },
154
+ {
155
+ "epoch": 10.0,
156
+ "learning_rate": 5e-07,
157
+ "loss": 0.2451,
158
+ "step": 1630
159
+ },
160
+ {
161
+ "epoch": 10.0,
162
+ "eval_accuracy": 0.7956110154905336,
163
+ "eval_f1": 0.787314593871832,
164
+ "eval_loss": 0.3013566732406616,
165
+ "eval_runtime": 9.3541,
166
+ "eval_samples_per_second": 248.446,
167
+ "eval_steps_per_second": 31.109,
168
+ "step": 1630
169
+ }
170
+ ],
171
+ "max_steps": 3260,
172
+ "num_train_epochs": 20,
173
+ "total_flos": 3.0441815576382144e+16,
174
+ "trial_name": null,
175
+ "trial_params": null
176
+ }
checkpoint-1630/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50dde5eeea306f54118173d342686475ad9209b6c2cac103f7b114d5f582dc36
3
+ size 3119
checkpoint-1630/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1793/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli",
3
+ "architectures": [
4
+ "ElectraForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "embedding_size": 1024,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "entailment",
14
+ "1": "neutral",
15
+ "2": "contradiction"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 4096,
19
+ "label2id": {
20
+ "contradiction": 2,
21
+ "entailment": 0,
22
+ "neutral": 1
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "electra",
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
+ "pad_token_id": 0,
30
+ "position_embedding_type": "absolute",
31
+ "problem_type": "multi_label_classification",
32
+ "summary_activation": "gelu",
33
+ "summary_last_dropout": 0.1,
34
+ "summary_type": "first",
35
+ "summary_use_proj": true,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.16.2",
38
+ "type_vocab_size": 2,
39
+ "use_cache": true,
40
+ "vocab_size": 30522
41
+ }
checkpoint-1793/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af8228fd6cf58783605e60520c21c70b9445fed6a90eeec2376e01fa43a9008c
3
+ size 2681490814
checkpoint-1793/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d4dbd4b1eb604850accc73f71776c97a1e07665ffd69d89f3c5e7d30e3f1f7c
3
+ size 1340743917
checkpoint-1793/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:726164d2bc62c68696d3d456b8b1f6aeca340f4529265e518db9792e9a355cd8
3
+ size 14503
checkpoint-1793/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:102acc952d72cdec1a2ca0aba7dd1acd73037d69b054e4a8eb81a3b60b73c6b6
3
+ size 559
checkpoint-1793/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caf14e34d5b3abd5a96ccc41d7e0ae86ff875f6a745203e1f8c2429070129f3e
3
+ size 623
checkpoint-1793/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
checkpoint-1793/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1793/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "normalization": true, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "google/electra-large-discriminator", "tokenizer_class": "ElectraTokenizer"}
checkpoint-1793/trainer_state.json ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2981628179550171,
3
+ "best_model_checkpoint": "outputs/electra-nli-efl-tweeteval/checkpoint-978",
4
+ "epoch": 10.997323135755257,
5
+ "global_step": 1793,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 9.499999999999999e-07,
13
+ "loss": 0.4384,
14
+ "step": 163
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.7444061962134251,
19
+ "eval_f1": 0.7308261375858633,
20
+ "eval_loss": 0.39615127444267273,
21
+ "eval_runtime": 9.0599,
22
+ "eval_samples_per_second": 256.514,
23
+ "eval_steps_per_second": 32.119,
24
+ "step": 163
25
+ },
26
+ {
27
+ "epoch": 2.0,
28
+ "learning_rate": 9e-07,
29
+ "loss": 0.3447,
30
+ "step": 326
31
+ },
32
+ {
33
+ "epoch": 2.0,
34
+ "eval_accuracy": 0.76592082616179,
35
+ "eval_f1": 0.7552159046464709,
36
+ "eval_loss": 0.3409559428691864,
37
+ "eval_runtime": 9.4378,
38
+ "eval_samples_per_second": 246.244,
39
+ "eval_steps_per_second": 30.833,
40
+ "step": 326
41
+ },
42
+ {
43
+ "epoch": 3.0,
44
+ "learning_rate": 8.499999999999999e-07,
45
+ "loss": 0.3057,
46
+ "step": 489
47
+ },
48
+ {
49
+ "epoch": 3.0,
50
+ "eval_accuracy": 0.7749569707401033,
51
+ "eval_f1": 0.768808341108185,
52
+ "eval_loss": 0.32338443398475647,
53
+ "eval_runtime": 9.0418,
54
+ "eval_samples_per_second": 257.028,
55
+ "eval_steps_per_second": 32.184,
56
+ "step": 489
57
+ },
58
+ {
59
+ "epoch": 4.0,
60
+ "learning_rate": 8e-07,
61
+ "loss": 0.287,
62
+ "step": 652
63
+ },
64
+ {
65
+ "epoch": 4.0,
66
+ "eval_accuracy": 0.7857142857142857,
67
+ "eval_f1": 0.7778970154753132,
68
+ "eval_loss": 0.3068828284740448,
69
+ "eval_runtime": 9.3662,
70
+ "eval_samples_per_second": 248.128,
71
+ "eval_steps_per_second": 31.069,
72
+ "step": 652
73
+ },
74
+ {
75
+ "epoch": 5.0,
76
+ "learning_rate": 7.5e-07,
77
+ "loss": 0.2742,
78
+ "step": 815
79
+ },
80
+ {
81
+ "epoch": 5.0,
82
+ "eval_accuracy": 0.7887263339070568,
83
+ "eval_f1": 0.7821763089027973,
84
+ "eval_loss": 0.30303624272346497,
85
+ "eval_runtime": 8.908,
86
+ "eval_samples_per_second": 260.89,
87
+ "eval_steps_per_second": 32.667,
88
+ "step": 815
89
+ },
90
+ {
91
+ "epoch": 6.0,
92
+ "learning_rate": 7e-07,
93
+ "loss": 0.2676,
94
+ "step": 978
95
+ },
96
+ {
97
+ "epoch": 6.0,
98
+ "eval_accuracy": 0.7938898450946644,
99
+ "eval_f1": 0.7850614050415754,
100
+ "eval_loss": 0.2981628179550171,
101
+ "eval_runtime": 9.0189,
102
+ "eval_samples_per_second": 257.681,
103
+ "eval_steps_per_second": 32.266,
104
+ "step": 978
105
+ },
106
+ {
107
+ "epoch": 7.0,
108
+ "learning_rate": 6.5e-07,
109
+ "loss": 0.2585,
110
+ "step": 1141
111
+ },
112
+ {
113
+ "epoch": 7.0,
114
+ "eval_accuracy": 0.7908777969018933,
115
+ "eval_f1": 0.7821955847641968,
116
+ "eval_loss": 0.3001907467842102,
117
+ "eval_runtime": 8.8872,
118
+ "eval_samples_per_second": 261.499,
119
+ "eval_steps_per_second": 32.744,
120
+ "step": 1141
121
+ },
122
+ {
123
+ "epoch": 8.0,
124
+ "learning_rate": 6e-07,
125
+ "loss": 0.2526,
126
+ "step": 1304
127
+ },
128
+ {
129
+ "epoch": 8.0,
130
+ "eval_accuracy": 0.7943201376936316,
131
+ "eval_f1": 0.7876461988304093,
132
+ "eval_loss": 0.30516260862350464,
133
+ "eval_runtime": 9.0042,
134
+ "eval_samples_per_second": 258.103,
135
+ "eval_steps_per_second": 32.318,
136
+ "step": 1304
137
+ },
138
+ {
139
+ "epoch": 9.0,
140
+ "learning_rate": 5.5e-07,
141
+ "loss": 0.2479,
142
+ "step": 1467
143
+ },
144
+ {
145
+ "epoch": 9.0,
146
+ "eval_accuracy": 0.7938898450946644,
147
+ "eval_f1": 0.784665589216992,
148
+ "eval_loss": 0.2996860444545746,
149
+ "eval_runtime": 9.1157,
150
+ "eval_samples_per_second": 254.944,
151
+ "eval_steps_per_second": 31.923,
152
+ "step": 1467
153
+ },
154
+ {
155
+ "epoch": 10.0,
156
+ "learning_rate": 5e-07,
157
+ "loss": 0.2451,
158
+ "step": 1630
159
+ },
160
+ {
161
+ "epoch": 10.0,
162
+ "eval_accuracy": 0.7956110154905336,
163
+ "eval_f1": 0.787314593871832,
164
+ "eval_loss": 0.3013566732406616,
165
+ "eval_runtime": 9.3541,
166
+ "eval_samples_per_second": 248.446,
167
+ "eval_steps_per_second": 31.109,
168
+ "step": 1630
169
+ },
170
+ {
171
+ "epoch": 11.0,
172
+ "learning_rate": 4.5e-07,
173
+ "loss": 0.2397,
174
+ "step": 1793
175
+ },
176
+ {
177
+ "epoch": 11.0,
178
+ "eval_accuracy": 0.7943201376936316,
179
+ "eval_f1": 0.7872381827932275,
180
+ "eval_loss": 0.30036163330078125,
181
+ "eval_runtime": 9.1941,
182
+ "eval_samples_per_second": 252.771,
183
+ "eval_steps_per_second": 31.651,
184
+ "step": 1793
185
+ }
186
+ ],
187
+ "max_steps": 3260,
188
+ "num_train_epochs": 20,
189
+ "total_flos": 3.348318385671187e+16,
190
+ "trial_name": null,
191
+ "trial_params": null
192
+ }
checkpoint-1793/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50dde5eeea306f54118173d342686475ad9209b6c2cac103f7b114d5f582dc36
3
+ size 3119
checkpoint-1793/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
config.json CHANGED
@@ -28,7 +28,6 @@
28
  "num_hidden_layers": 24,
29
  "pad_token_id": 0,
30
  "position_embedding_type": "absolute",
31
- "problem_type": "multi_label_classification",
32
  "summary_activation": "gelu",
33
  "summary_last_dropout": 0.1,
34
  "summary_type": "first",
 
28
  "num_hidden_layers": 24,
29
  "pad_token_id": 0,
30
  "position_embedding_type": "absolute",
 
31
  "summary_activation": "gelu",
32
  "summary_last_dropout": 0.1,
33
  "summary_type": "first",
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7633807fb9e36e6b1a07c28414b0d317873a57fcff768c36a39a20807d0f3b4f
3
  size 1340743917
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1017cd7d6d785c901c6c1b5a7c8a543da183235f0b9932da80f8f3cd0484598b
3
  size 1340743917
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50dde5eeea306f54118173d342686475ad9209b6c2cac103f7b114d5f582dc36
3
  size 3119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa5b3cdd065caa2d04215020fd56dc58b2f03c96229986e222c4af6a4f7d97b7
3
  size 3119