vpelloin commited on
Commit
fed665d
1 Parent(s): 2acc69e

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +20 -1
  2. tokenizer_config.json +28 -1
  3. vocab.json +0 -0
special_tokens_map.json CHANGED
@@ -1 +1,20 @@
1
- {"bos_token": "<s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "</s>", "mask_token": "<special1>", "additional_special_tokens": ["<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>"]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<special0>",
4
+ "<special1>",
5
+ "<special2>",
6
+ "<special3>",
7
+ "<special4>",
8
+ "<special5>",
9
+ "<special6>",
10
+ "<special7>",
11
+ "<special8>",
12
+ "<special9>"
13
+ ],
14
+ "bos_token": "<s>",
15
+ "cls_token": "</s>",
16
+ "mask_token": "<special1>",
17
+ "pad_token": "<pad>",
18
+ "sep_token": "</s>",
19
+ "unk_token": "<unk>"
20
+ }
tokenizer_config.json CHANGED
@@ -1 +1,28 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "</s>", "mask_token": "<special1>", "additional_special_tokens": ["<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>"], "lang2id": null, "id2lang": null, "do_lowercase_and_remove_accent": true, "do_lower_case": true, "keep_accents": true, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "/export/home/lium/vpelloin/git/ghannay_slu_MEDIA_Eval/BERT/FineTune_FlauBERT/12-03-22_18-48/flaubert/flaubert_base_uncased/checkpoint-best/asr-ref", "tokenizer_class": "FlaubertTokenizer"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<special0>",
4
+ "<special1>",
5
+ "<special2>",
6
+ "<special3>",
7
+ "<special4>",
8
+ "<special5>",
9
+ "<special6>",
10
+ "<special7>",
11
+ "<special8>",
12
+ "<special9>"
13
+ ],
14
+ "bos_token": "<s>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "cls_token": "</s>",
17
+ "do_lower_case": true,
18
+ "id2lang": null,
19
+ "keep_accents": true,
20
+ "lang2id": null,
21
+ "mask_token": "<special1>",
22
+ "model_max_length": 512,
23
+ "pad_token": "<pad>",
24
+ "sep_token": "</s>",
25
+ "tokenizer_class": "FlaubertTokenizer",
26
+ "tokenizer_file": null,
27
+ "unk_token": "<unk>"
28
+ }
vocab.json CHANGED
The diff for this file is too large to render. See raw diff