add tokenizer
Browse files- .gitignore +1 -0
- added_tokens.json +1 -0
- runs/Dec03_07-23-25_64d6258b628c/1670052229.769173/events.out.tfevents.1670052229.64d6258b628c.101.1 +3 -0
- runs/Dec03_07-23-25_64d6258b628c/1670052919.6945987/events.out.tfevents.1670052919.64d6258b628c.101.2 +3 -0
- runs/Dec03_07-23-25_64d6258b628c/events.out.tfevents.1670052229.64d6258b628c.101.0 +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
checkpoint-*/
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 62, "</s>": 63}
|
runs/Dec03_07-23-25_64d6258b628c/1670052229.769173/events.out.tfevents.1670052229.64d6258b628c.101.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f096506b62a96a77ce29c935bd84af14e289eb275482c8d399045b5eeb675ee5
|
3 |
+
size 4576
|
runs/Dec03_07-23-25_64d6258b628c/1670052919.6945987/events.out.tfevents.1670052919.64d6258b628c.101.2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d19e91a1a6275e7eaed53bd6460f851e4b3602a448a9e1e91ef232d50181f12a
|
3 |
+
size 4576
|
runs/Dec03_07-23-25_64d6258b628c/events.out.tfevents.1670052229.64d6258b628c.101.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:372de74253ed6c5cf12280873bf238ceaa3286c003ccbd3174b2340d26fceebf
|
3 |
+
size 8422
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"ँ": 1, "ं": 2, "ः": 3, "अ": 4, "आ": 5, "इ": 6, "ई": 7, "उ": 8, "ऊ": 9, "ए": 10, "ऐ": 11, "ऑ": 12, "ओ": 13, "औ": 14, "क": 15, "ख": 16, "ग": 17, "घ": 18, "च": 19, "छ": 20, "ज": 21, "झ": 22, "ट": 23, "ठ": 24, "ड": 25, "ढ": 26, "ण": 27, "त": 28, "थ": 29, "द": 30, "ध": 31, "न": 32, "प": 33, "फ": 34, "ब": 35, "भ": 36, "म": 37, "य": 38, "र": 39, "ल": 40, "व": 41, "श": 42, "ष": 43, "स": 44, "ह": 45, "़": 46, "ा": 47, "ि": 48, "ी": 49, "ु": 50, "ू": 51, "ृ": 52, "े": 53, "ै": 54, "ॉ": 55, "ो": 56, "ौ": 57, "्": 58, "ड़": 59, "|": 0, "[UNK]": 60, "[PAD]": 61}
|