1 {
2 "architectures": [
3 "BertForMaskedLM"
4 ],
5 "attention_probs_dropout_prob": 0.1,
6 "gradient_checkpointing": false,
7 "hidden_act": "gelu",
8 "hidden_dropout_prob": 0.1,
9 "hidden_size": 256,
10 "initializer_range": 0.02,
11 "intermediate_size": 1024,
12 "layer_norm_eps": 1e-12,
13 "max_position_embeddings": 512,
14 "model_type": "bert",
15 "num_attention_heads": 4,
16 "num_hidden_layers": 12,
17 "pad_token_id": 0,
18 "type_vocab_size": 2,
19 "vocab_size": 21128
20 }
21