add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 78, "</s>": 79}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "।", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{" ": 0, "/": 1, "a": 2, "e": 3, "g": 4, "l": 5, "o": 6, "p": 7, "y": 8, "।": 9, "॥": 10, "ঁ": 11, "ং": 12, "ঃ": 13, "অ": 14, "আ": 15, "ই": 16, "ঈ": 17, "উ": 18, "ঊ": 19, "ঋ": 20, "এ": 21, "ঐ": 22, "ও": 23, "ঔ": 24, "ক": 25, "খ": 26, "গ": 27, "ঘ": 28, "ঙ": 29, "চ": 30, "ছ": 31, "জ": 32, "ঝ": 33, "ঞ": 34, "ট": 35, "ঠ": 36, "ড": 37, "ঢ": 38, "ণ": 39, "ত": 40, "থ": 41, "দ": 42, "ধ": 43, "ন": 44, "প": 45, "ফ": 46, "ব": 47, "ভ": 48, "ম": 49, "য": 50, "র": 51, "ল": 52, "শ": 53, "ষ": 54, "স": 55, "হ": 56, "়": 57, "া": 58, "ি": 59, "ী": 60, "ু": 61, "ূ": 62, "ৃ": 63, "ে": 64, "ৈ": 65, "ো": 66, "ৌ": 67, "্": 68, "ৎ": 69, "ড়": 70, "য়": 71, "ৰ": 72, "—": 73, "’": 74, "‚": 75, "[UNK]": 76, "[PAD]": 77}
|