add tokenizer
Browse files- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"c": 0, "j": 1, "'": 2, "v": 3, "ü": 4, "q": 5, "h": 6, "s": 7, "y": 8, "z": 9, "d": 10, "̇": 11, "ö": 12, "n": 13, "l": 14, "o": 15, "m": 16, "e": 17, "ş": 18, "f": 19, "w": 20, "r": 21, "â": 22, "t": 23, "ë": 24, "a": 25, "g": 26, "î": 27, "k": 28, "ğ": 29, "i": 31, "p": 32, "ı": 33, "x": 34, "u": 35, "b": 36, "ç": 37, "|": 30, "[UNK]": 38, "[PAD]": 39}
|