mengzhouxia commited on
Commit
94d8da8
1 Parent(s): 49c7d37

first commit

Browse files
Files changed (4) hide show
  1. config.json +191 -0
  2. pytorch_model.bin +3 -0
  3. tokenizer_config.json +1 -0
  4. vocab.txt +0 -0
config.json ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "NewBertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "decompose_qk": false,
7
+ "decompose_vo": false,
8
+ "do_distill": true,
9
+ "do_emb_distill": false,
10
+ "do_layer_distill": true,
11
+ "do_mha_distill": false,
12
+ "do_mha_layer_distill": false,
13
+ "finetuning_task": "qnli",
14
+ "gradient_checkpointing": false,
15
+ "hidden_act": "gelu",
16
+ "hidden_dropout_prob": 0.1,
17
+ "hidden_size": 768,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "layer_norm_eps": 1e-12,
21
+ "max_position_embeddings": 512,
22
+ "model_type": "bert",
23
+ "num_attention_heads": 12,
24
+ "num_hidden_layers": 12,
25
+ "output_attentions": true,
26
+ "output_hidden_states": true,
27
+ "pad_token_id": 0,
28
+ "pruned_heads": {
29
+ "0": [
30
+ 1,
31
+ 2,
32
+ 3,
33
+ 4,
34
+ 5,
35
+ 6,
36
+ 7,
37
+ 9,
38
+ 10,
39
+ 11
40
+ ],
41
+ "1": [
42
+ 0,
43
+ 2,
44
+ 3,
45
+ 5,
46
+ 6,
47
+ 7,
48
+ 8,
49
+ 9,
50
+ 10
51
+ ],
52
+ "2": [
53
+ 1,
54
+ 2,
55
+ 3,
56
+ 4,
57
+ 5,
58
+ 7,
59
+ 8,
60
+ 9,
61
+ 10,
62
+ 11
63
+ ],
64
+ "3": [
65
+ 1,
66
+ 2,
67
+ 3,
68
+ 4,
69
+ 5,
70
+ 6,
71
+ 7,
72
+ 8,
73
+ 10,
74
+ 11
75
+ ],
76
+ "4": [
77
+ 0,
78
+ 1,
79
+ 2,
80
+ 3,
81
+ 4,
82
+ 5,
83
+ 6,
84
+ 7,
85
+ 8,
86
+ 9,
87
+ 10,
88
+ 11
89
+ ],
90
+ "5": [
91
+ 0,
92
+ 1,
93
+ 2,
94
+ 3,
95
+ 4,
96
+ 5,
97
+ 6,
98
+ 7,
99
+ 8,
100
+ 9,
101
+ 10,
102
+ 11
103
+ ],
104
+ "6": [
105
+ 0,
106
+ 1,
107
+ 2,
108
+ 3,
109
+ 4,
110
+ 5,
111
+ 6,
112
+ 7,
113
+ 8,
114
+ 9,
115
+ 10,
116
+ 11
117
+ ],
118
+ "7": [
119
+ 0,
120
+ 1,
121
+ 2,
122
+ 3,
123
+ 4,
124
+ 5,
125
+ 6,
126
+ 7,
127
+ 8,
128
+ 9,
129
+ 11
130
+ ],
131
+ "8": [
132
+ 0,
133
+ 1,
134
+ 2,
135
+ 3,
136
+ 4,
137
+ 5,
138
+ 6,
139
+ 7,
140
+ 8,
141
+ 9,
142
+ 10,
143
+ 11
144
+ ],
145
+ "9": [
146
+ 0,
147
+ 1,
148
+ 2,
149
+ 3,
150
+ 4,
151
+ 5,
152
+ 6,
153
+ 7,
154
+ 9,
155
+ 10
156
+ ],
157
+ "10": [
158
+ 0,
159
+ 1,
160
+ 2,
161
+ 3,
162
+ 4,
163
+ 5,
164
+ 6,
165
+ 7,
166
+ 8,
167
+ 9,
168
+ 10,
169
+ 11
170
+ ],
171
+ "11": [
172
+ 0,
173
+ 1,
174
+ 2,
175
+ 3,
176
+ 4,
177
+ 5,
178
+ 6,
179
+ 7,
180
+ 8,
181
+ 9,
182
+ 10,
183
+ 11
184
+ ]
185
+ },
186
+ "qk_denominator": "ori",
187
+ "sephidden_pruned": false,
188
+ "transform_embedding": false,
189
+ "type_vocab_size": 2,
190
+ "vocab_size": 30522
191
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7d4cb376aa063497d6eb6a880d3fd9caecb49b42238165e2b35b2386ac3817a
3
+ size 116249035
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "model_max_length": 512}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff