add tokenizer
Browse files- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"A": 0, "E": 1, "p": 2, "J": 3, "o": 4, "'": 5, "z": 6, "u": 7, "g": 8, "W": 9, "C": 10, "n": 11, "-": 12, "i": 13, "B": 14, "a": 15, ".": 16, "K": 17, ",": 18, "O": 19, "l": 20, "H": 21, "m": 22, "T": 23, "b": 24, "y": 25, "j": 26, "c": 27, "G": 28, "\"": 29, "V": 30, "r": 31, "Q": 32, "I": 33, "!": 34, "Y": 35, ":": 36, "x": 37, "U": 38, "N": 39, "h": 40, "f": 41, "P": 42, "M": 43, "D": 44, "t": 45, "w": 46, "e": 47, "F": 48, ";": 49, "k": 50, "s": 52, "X": 53, "S": 54, "L": 55, "v": 56, "q": 57, "R": 58, "d": 59, "?": 60, "|": 51, "[UNK]": 61, "[PAD]": 62}
|