bert-large-cased / config.json
1 {
2 "architectures": [
3 "BertForMaskedLM"
4 ],
5 "attention_probs_dropout_prob": 0.1,
6 "directionality": "bidi",
7 "gradient_checkpointing": false,
8 "hidden_act": "gelu",
9 "hidden_dropout_prob": 0.1,
10 "hidden_size": 1024,
11 "initializer_range": 0.02,
12 "intermediate_size": 4096,
13 "layer_norm_eps": 1e-12,
14 "max_position_embeddings": 512,
15 "model_type": "bert",
16 "num_attention_heads": 16,
17 "num_hidden_layers": 24,
18 "pad_token_id": 0,
19 "pooler_fc_size": 768,
20 "pooler_num_attention_heads": 12,
21 "pooler_num_fc_layers": 3,
22 "pooler_size_per_head": 128,
23 "pooler_type": "first_token_transform",
24 "position_embedding_type": "absolute",
25 "transformers_version": "4.6.0.dev0",
26 "type_vocab_size": 2,
27 "use_cache": true,
28 "vocab_size": 28996
29 }
30