GBERTQnA / config.json
1 {
2 "architectures": [
3 "BertForQuestionAnswering"
4 ],
5 "attention_probs_dropout_prob": 0.1,
6 "bos_token_id": 0,
7 "eos_token_ids": 0,
8 "gradient_checkpointing": false,
9 "hidden_act": "gelu",
10 "hidden_dropout_prob": 0.1,
11 "hidden_size": 1024,
12 "initializer_range": 0.02,
13 "intermediate_size": 4096,
14 "layer_norm_eps": 1e-12,
15 "max_position_embeddings": 512,
16 "model_type": "bert",
17 "num_attention_heads": 16,
18 "num_hidden_layers": 24,
19 "output_past": true,
20 "pad_token_id": 0,
21 "position_embedding_type": "absolute",
22 "type_vocab_size": 2,
23 "vocab_size": 31102
24 }
25