add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 65, "</s>": 66}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"s": 0, "щ": 1, "м": 2, "'": 3, ")": 4, "й": 5, "r": 6, "x": 7, "t": 8, "»": 9, "(": 10, "n": 11, "з": 12, "o": 13, "e": 14, "я": 16, "g": 17, "ч": 18, "p": 19, "к": 20, "‑": 21, "ь": 22, "a": 23, "ю": 24, "у": 25, "н": 26, "…": 27, "т": 28, "ц": 29, "k": 30, "и": 31, "ё": 32, "ш": 33, "b": 34, "г": 35, "−": 36, "п": 37, "l": 38, "h": 39, "m": 40, "д": 41, "х": 42, "—": 43, "«": 44, "c": 45, "ф": 46, "б": 47, "р": 48, "f": 49, "ж": 50, "а": 51, "о": 52, "i": 53, "э": 54, "ы": 55, "е": 56, "ъ": 57, "л": 58, "z": 59, "в": 60, "с": 61, "–": 62, "|": 15, "[UNK]": 63, "[PAD]": 64}
|