mengzhouxia commited on
Commit
4313fcd
1 Parent(s): c597518

first commit

Browse files
Files changed (4) hide show
  1. config.json +109 -0
  2. pytorch_model.bin +3 -0
  3. tokenizer_config.json +1 -0
  4. vocab.txt +0 -0
config.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/checkpoint/mengzhouxia/space2/out/squad/layerdistillv3_prunehidden/squad_l0_headint_nosvd_layerpuning_layerdistillv3_prunehidden_seed57_distilltemp2_pretrain6000_warpup12000_ts0.6_cealpha0.5_20epochs/best",
3
+ "architectures": [
4
+ "NewBertForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "decompose_qk": false,
8
+ "decompose_vo": false,
9
+ "do_distill": true,
10
+ "do_emb_distill": false,
11
+ "do_layer_distill": true,
12
+ "do_mha_distill": false,
13
+ "do_mha_layer_distill": false,
14
+ "gradient_checkpointing": false,
15
+ "hidden_act": "gelu",
16
+ "hidden_dropout_prob": 0.1,
17
+ "hidden_size": 768,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "layer_norm_eps": 1e-12,
21
+ "max_position_embeddings": 512,
22
+ "model_type": "bert",
23
+ "num_attention_heads": 12,
24
+ "num_hidden_layers": 12,
25
+ "output_attentions": true,
26
+ "output_hidden_states": true,
27
+ "pad_token_id": 0,
28
+ "position_embedding_type": "absolute",
29
+ "pruned_heads": {
30
+ "0": [
31
+ 2,
32
+ 5
33
+ ],
34
+ "1": [
35
+ 0,
36
+ 2,
37
+ 3
38
+ ],
39
+ "2": [
40
+ 8,
41
+ 11,
42
+ 4,
43
+ 7
44
+ ],
45
+ "3": [
46
+ 2,
47
+ 4
48
+ ],
49
+ "4": [
50
+ 8
51
+ ],
52
+ "5": [
53
+ 1,
54
+ 2,
55
+ 11
56
+ ],
57
+ "6": [
58
+ 2,
59
+ 3
60
+ ],
61
+ "7": [
62
+ 11,
63
+ 3,
64
+ 6,
65
+ 7
66
+ ],
67
+ "8": [
68
+ 0,
69
+ 4
70
+ ],
71
+ "9": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 7,
79
+ 9,
80
+ 10,
81
+ 11
82
+ ],
83
+ "10": [
84
+ 1,
85
+ 4,
86
+ 5,
87
+ 6,
88
+ 7,
89
+ 8
90
+ ],
91
+ "11": [
92
+ 0,
93
+ 3,
94
+ 5,
95
+ 7,
96
+ 8,
97
+ 9,
98
+ 10,
99
+ 11
100
+ ]
101
+ },
102
+ "qk_denominator": "ori",
103
+ "sephidden_pruned": false,
104
+ "transform_embedding": false,
105
+ "transformers_version": "4.3.2",
106
+ "type_vocab_size": 2,
107
+ "use_cache": true,
108
+ "vocab_size": 30522
109
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e00dac6494d271b619117d62c4e34609b693565efc17ebc478bdbfd04cb667dd
3
+ size 235176183
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "name_or_path": "bert-base-uncased"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff