wptoux commited on
Commit
5adec38
β€’
1 Parent(s): fa175f6

fix tokenizer https://github.com/huggingface/transformers/issues/9850#issue-795118067

Browse files
Files changed (3) hide show
  1. config.json +5 -33
  2. pytorch_model.bin +2 -2
  3. tokenizer_config.json +1 -1
config.json CHANGED
@@ -1,64 +1,36 @@
1
  {
 
2
  "_num_labels": 2,
3
  "architectures": [
4
  "AlbertForQuestionAnswering"
5
  ],
6
  "attention_probs_dropout_prob": 0,
7
- "bad_words_ids": null,
8
  "bos_token_id": 2,
9
  "classifier_dropout_prob": 0.1,
10
- "decoder_start_token_id": null,
11
- "do_sample": false,
12
  "down_scale_factor": 1,
13
- "early_stopping": false,
14
  "embedding_size": 128,
15
  "eos_token_id": 3,
16
- "finetuning_task": null,
17
  "gap_size": 0,
18
  "hidden_act": "relu",
19
  "hidden_dropout_prob": 0,
20
  "hidden_size": 1024,
21
- "id2label": {
22
- "0": "LABEL_0",
23
- "1": "LABEL_1"
24
- },
25
  "initializer_range": 0.02,
26
  "inner_group_num": 1,
27
  "intermediate_size": 4096,
28
- "is_decoder": false,
29
- "is_encoder_decoder": false,
30
- "label2id": {
31
- "LABEL_0": 0,
32
- "LABEL_1": 1
33
- },
34
  "layer_norm_eps": 1e-12,
35
  "layers_to_keep": [],
36
- "length_penalty": 1.0,
37
- "max_length": 20,
38
  "max_position_embeddings": 512,
39
- "min_length": 0,
40
  "model_type": "albert",
41
  "net_structure_type": 0,
42
- "no_repeat_ngram_size": 0,
43
  "num_attention_heads": 16,
44
- "num_beams": 1,
45
  "num_hidden_groups": 1,
46
  "num_hidden_layers": 24,
47
  "num_memory_blocks": 0,
48
- "num_return_sequences": 1,
49
- "output_attentions": false,
50
- "output_hidden_states": false,
51
  "output_past": true,
52
  "pad_token_id": 0,
53
- "prefix": null,
54
- "pruned_heads": {},
55
- "repetition_penalty": 1.0,
56
- "task_specific_params": null,
57
- "temperature": 1.0,
58
- "top_k": 50,
59
- "top_p": 1.0,
60
- "torchscript": false,
61
  "type_vocab_size": 2,
62
- "use_bfloat16": false,
63
- "vocab_size": 21128
64
  }
1
  {
2
+ "_name_or_path": "wptoux/albert-chinese-large-qa",
3
  "_num_labels": 2,
4
  "architectures": [
5
  "AlbertForQuestionAnswering"
6
  ],
7
  "attention_probs_dropout_prob": 0,
 
8
  "bos_token_id": 2,
9
  "classifier_dropout_prob": 0.1,
 
 
10
  "down_scale_factor": 1,
 
11
  "embedding_size": 128,
12
  "eos_token_id": 3,
 
13
  "gap_size": 0,
14
  "hidden_act": "relu",
15
  "hidden_dropout_prob": 0,
16
  "hidden_size": 1024,
 
 
 
 
17
  "initializer_range": 0.02,
18
  "inner_group_num": 1,
19
  "intermediate_size": 4096,
 
 
 
 
 
 
20
  "layer_norm_eps": 1e-12,
21
  "layers_to_keep": [],
 
 
22
  "max_position_embeddings": 512,
 
23
  "model_type": "albert",
24
  "net_structure_type": 0,
 
25
  "num_attention_heads": 16,
 
26
  "num_hidden_groups": 1,
27
  "num_hidden_layers": 24,
28
  "num_memory_blocks": 0,
 
 
 
29
  "output_past": true,
30
  "pad_token_id": 0,
31
+ "position_embedding_type": "absolute",
32
+ "transformers_version": "4.2.2",
 
 
 
 
 
 
33
  "type_vocab_size": 2,
34
+ "vocab_size": 21128,
35
+ "tokenizer_class": "BertTokenizer"
36
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe3b10626af99f4155b87a594b183fde039aff0a9a22c8ee8a507d3bb033f6ae
3
- size 66208204
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ce291dd15f30272c28b1452326e50b1a921ddd763191693a9f2f4506adf163d
3
+ size 62018265
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": false}
1
+ {"do_lower_case": false, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": "/root/.cache/huggingface/transformers/1f536158f09966b7ddca1f2c06264b78535edb420d115380141a23361d136d78.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d", "tokenizer_file": null, "name_or_path": "wptoux/albert-chinese-large-qa"}