add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 47, "</s>": 48}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"¡": 1, "h": 2, "…": 3, "y": 4, "u": 5, "á": 6, "í": 7, "¿": 8, "j": 9, "b": 10, "m": 11, "−": 12, "_": 13, "i": 14, "w": 15, "p": 16, "v": 17, "a": 18, "k": 19, "ñ": 20, "»": 21, "e": 22, "s": 23, "«": 24, "’": 25, "q": 26, "o": 27, "é": 28, "t": 29, "d": 30, "f": 31, "g": 32, "x": 33, "ü": 34, "c": 35, "—": 36, "l": 37, "'": 38, "r": 39, "n": 40, "z": 41, "ú": 42, "–": 43, "ó": 44, "|": 0, "[UNK]": 45, "[PAD]": 46}
|