yacht commited on
Commit
5be93b0
1 Parent(s): 7e76d40

update tokenizer with fast version

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json CHANGED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -1 +1,19 @@
1
- {"bos_token": "[BOS]", "eos_token": "[EOS]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "additional_special_tokens": ["[UNC]", "[LEN]", "[LPP]", "[BEST2010]", "[LST20]", "[TNHC]", "[VISTEC-TPTH2021]", "[WS160]"]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "[UNC]",
4
+ "[LEN]",
5
+ "[LPP]",
6
+ "[BEST2010]",
7
+ "[LST20]",
8
+ "[TNHC]",
9
+ "[VISTEC-TPTH2021]",
10
+ "[WS160]"
11
+ ],
12
+ "bos_token": "[BOS]",
13
+ "cls_token": "[CLS]",
14
+ "eos_token": "[EOS]",
15
+ "mask_token": "[MASK]",
16
+ "pad_token": "[PAD]",
17
+ "sep_token": "[SEP]",
18
+ "unk_token": "[UNK]"
19
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50ae015e8a33253054d75c1de67a95f78284233344b9ca628b84acf260fc68be
3
+ size 33896546
tokenizer_config.json CHANGED
@@ -1 +1,16 @@
1
- {"do_lower_case": false, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "tokenizer_file": "data/ptm/bert-base-multilingual-cased/tokenizer.json", "name_or_path": "data/ptm/bert-base-multilingual-cased", "tokenizer_class": "BertTokenizer"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": false,
5
+ "from_slow": true,
6
+ "mask_token": "[MASK]",
7
+ "name_or_path": "tmp/tokenizers/th/",
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "special_tokens_map_file": null,
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "BertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }