Vimos Tan commited on
Commit
da78c26
1 Parent(s): a58ba86

Add chengyubert_2stage_stage1_wwm_ext

Browse files
Files changed (4) hide show
  1. bert_config.json +19 -0
  2. config.json +20 -0
  3. pytorch_model.bin +3 -0
  4. vocab.txt +0 -0
bert_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_probs_dropout_prob": 0.1,
3
+ "directionality": "bidi",
4
+ "hidden_act": "gelu",
5
+ "hidden_dropout_prob": 0.1,
6
+ "hidden_size": 768,
7
+ "initializer_range": 0.02,
8
+ "intermediate_size": 3072,
9
+ "max_position_embeddings": 512,
10
+ "num_attention_heads": 12,
11
+ "num_hidden_layers": 12,
12
+ "pooler_fc_size": 768,
13
+ "pooler_num_attention_heads": 12,
14
+ "pooler_num_fc_layers": 3,
15
+ "pooler_size_per_head": 128,
16
+ "pooler_type": "first_token_transform",
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 21128
19
+ }
config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "attention_probs_dropout_prob": 0.1,
4
+ "directionality": "bidi",
5
+ "hidden_act": "gelu",
6
+ "hidden_dropout_prob": 0.1,
7
+ "hidden_size": 768,
8
+ "initializer_range": 0.02,
9
+ "intermediate_size": 3072,
10
+ "max_position_embeddings": 512,
11
+ "num_attention_heads": 12,
12
+ "num_hidden_layers": 12,
13
+ "pooler_fc_size": 768,
14
+ "pooler_num_attention_heads": 12,
15
+ "pooler_num_fc_layers": 3,
16
+ "pooler_size_per_head": 128,
17
+ "pooler_type": "first_token_transform",
18
+ "type_vocab_size": 2,
19
+ "vocab_size": 21128
20
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06d55c7f8f63b6cc02f9750788b0a8e74e553b0a5214e3dc6bb4b2a47c80e363
3
+ size 520954843
vocab.txt ADDED
The diff for this file is too large to render. See raw diff