hsiehpinghan commited on
Commit
d525992
1 Parent(s): e57b5ed

Add model and tokenizer files

Browse files
config.json CHANGED
@@ -1,28 +1,33 @@
1
  {
2
- "_name_or_path": "camembert-base",
3
  "architectures": [
4
- "CamembertForMaskedLM"
5
  ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 5,
8
- "classifier_dropout": null,
9
- "eos_token_id": 6,
 
 
 
10
  "hidden_act": "gelu",
11
- "hidden_dropout_prob": 0.1,
12
- "hidden_size": 768,
13
  "initializer_range": 0.02,
14
- "intermediate_size": 3072,
15
- "layer_norm_eps": 1e-05,
16
- "max_position_embeddings": 514,
17
- "model_type": "camembert",
 
 
18
  "num_attention_heads": 12,
19
- "num_hidden_layers": 12,
20
- "output_past": true,
21
- "pad_token_id": 1,
 
22
  "position_embedding_type": "absolute",
23
  "torch_dtype": "float32",
24
  "transformers_version": "4.18.0",
25
- "type_vocab_size": 1,
26
- "use_cache": true,
27
- "vocab_size": 32005
28
  }
 
1
  {
2
+ "_name_or_path": "voidful/albert_chinese_tiny",
3
  "architectures": [
4
+ "AlbertForMaskedLM"
5
  ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "down_scale_factor": 1,
10
+ "embedding_size": 128,
11
+ "eos_token_id": 3,
12
+ "gap_size": 0,
13
  "hidden_act": "gelu",
14
+ "hidden_dropout_prob": 0.0,
15
+ "hidden_size": 312,
16
  "initializer_range": 0.02,
17
+ "inner_group_num": 1,
18
+ "intermediate_size": 1248,
19
+ "layer_norm_eps": 1e-12,
20
+ "max_position_embeddings": 512,
21
+ "model_type": "albert",
22
+ "net_structure_type": 0,
23
  "num_attention_heads": 12,
24
+ "num_hidden_groups": 1,
25
+ "num_hidden_layers": 4,
26
+ "num_memory_blocks": 0,
27
+ "pad_token_id": 0,
28
  "position_embedding_type": "absolute",
29
  "torch_dtype": "float32",
30
  "transformers_version": "4.18.0",
31
+ "type_vocab_size": 2,
32
+ "vocab_size": 21128
 
33
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49bcca0a8b0e23a9d13e5cc64b16ad539f5316ec5f50e56d2062960592481075
3
- size 442691115
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea6fdff436dcdd23221febab548ccbf74e8a2cca394bd993d5289d9fc6dd9d95
3
+ size 16190158
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}, "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"]}
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"], "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "camembert-base", "tokenizer_class": "CamembertTokenizer"}
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "special_tokens_map_file": null, "name_or_path": "voidful/albert_chinese_tiny", "tokenizer_class": "BertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff