add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 46, "</s>": 47}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"o": 0, "î": 1, "v": 2, "g": 3, "y": 5, "é": 6, "ü": 7, "x": 8, "z": 9, "b": 10, "d": 11, "ë": 12, "j": 13, "s": 14, "ı": 15, "k": 16, "m": 17, "l": 18, "p": 19, "…": 20, "ğ": 21, "w": 22, "ö": 23, "â": 24, "q": 25, "(": 26, "c": 27, "r": 28, "a": 29, ")": 30, "û": 31, "u": 32, "n": 33, "’": 34, "'": 35, "ş": 36, "ç": 37, "e": 38, "i": 39, "t": 40, "̇": 41, "f": 42, "h": 43, "|": 4, "[UNK]": 44, "[PAD]": 45}
|