add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 50, "</s>": 51}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"m": 0, "u": 1, "y": 2, "s": 3, "â": 4, "g": 5, "h": 6, "w": 7, "ù": 8, "q": 9, "é": 10, ")": 11, "v": 12, "k": 13, "«": 14, "'": 15, "è": 16, "f": 17, "ü": 18, "t": 19, "b": 20, "p": 21, "o": 22, "e": 23, "r": 24, "z": 25, "ñ": 26, "à": 27, "d": 28, "’": 29, "ç": 30, "l": 31, "a": 32, "ô": 33, "ê": 34, "i": 36, "j": 37, "»": 38, "ʼ": 39, "x": 40, "½": 41, "û": 42, "(": 43, "…": 44, "/": 45, "n": 46, "c": 47, "|": 35, "[UNK]": 48, "[PAD]": 49}
|