albert-base-v2-mnli / config.json
1 {
2 "_name_or_path": "albert-base-v2",
3 "architectures": [
4 "AlbertForSequenceClassification"
5 ],
6 "attention_probs_dropout_prob": 0,
7 "bos_token_id": 2,
8 "classifier_dropout_prob": 0.1,
9 "down_scale_factor": 1,
10 "embedding_size": 128,
11 "eos_token_id": 3,
12 "finetuning_task": "mnli",
13 "gap_size": 0,
14 "hidden_act": "gelu_new",
15 "hidden_dropout_prob": 0,
16 "hidden_size": 768,
17 "id2label": {
18 "0": "LABEL_0",
19 "1": "LABEL_1",
20 "2": "LABEL_2"
21 },
22 "initializer_range": 0.02,
23 "inner_group_num": 1,
24 "intermediate_size": 3072,
25 "label2id": {
26 "LABEL_0": 0,
27 "LABEL_1": 1,
28 "LABEL_2": 2
29 },
30 "layer_norm_eps": 1e-12,
31 "max_position_embeddings": 512,
32 "model_type": "albert",
33 "net_structure_type": 0,
34 "num_attention_heads": 12,
35 "num_hidden_groups": 1,
36 "num_hidden_layers": 12,
37 "num_memory_blocks": 0,
38 "pad_token_id": 0,
39 "position_embedding_type": "absolute",
40 "transformers_version": "4.6.1",
41 "type_vocab_size": 2,
42 "vocab_size": 30000
43 }
44