Umong commited on
Commit
9f8f456
1 Parent(s): 7dd49cb

Upload lm-boosted decoder

Browse files
alphabet.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"labels": ["</s>", "", "<s>", "\u2047", " ", "\u0981", "\u0982", "\u0983", "\u0985", "\u0986", "\u0987", "\u0988", "\u0989", "\u098a", "\u098b", "\u098f", "\u0990", "\u0993", "\u0994", "\u0995", "\u0996", "\u0997", "\u0998", "\u0999", "\u099a", "\u099b", "\u099c", "\u099d", "\u099e", "\u099f", "\u09a0", "\u09a1", "\u09a2", "\u09a3", "\u09a4", "\u09a5", "\u09a6", "\u09a7", "\u09a8", "\u09aa", "\u09ab", "\u09ac", "\u09ad", "\u09ae", "\u09af", "\u09b0", "\u09b2", "\u09b6", "\u09b7", "\u09b8", "\u09b9", "\u09be", "\u09bf", "\u09c0", "\u09c1", "\u09c2", "\u09c3", "\u09c7", "\u09c8", "\u09cb", "\u09cc", "\u09cd", "\u09ce", "\u09dc", "\u09dd", "\u09df"], "is_bpe": false}
language_model/5gram.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2fa769c2be7ef26f1b35cfd6f7d29684cc781eb7f0d469ddbccd7358136cd9
3
+ size 301267451
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/unigrams.txt ADDED
File without changes
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "processor_class": "Wav2Vec2ProcessorWithLM",
8
+ "return_attention_mask": true,
9
+ "sampling_rate": 16000
10
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<pad>",
5
+ "unk_token": "<unk>"
6
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "do_lower_case": false,
5
+ "eos_token": "</s>",
6
+ "model_max_length": 1000000000000000019884624838656,
7
+ "pad_token": "<pad>",
8
+ "processor_class": "Wav2Vec2ProcessorWithLM",
9
+ "replace_word_delimiter_char": " ",
10
+ "target_lang": null,
11
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
12
+ "unk_token": "<unk>",
13
+ "word_delimiter_token": "|"
14
+ }
vocab.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</s>": 65,
3
+ "<pad>": 63,
4
+ "<s>": 64,
5
+ "<unk>": 62,
6
+ "|": 0,
7
+ "ঁ": 1,
8
+ "ং": 2,
9
+ "ঃ": 3,
10
+ "অ": 4,
11
+ "আ": 5,
12
+ "ই": 6,
13
+ "ঈ": 7,
14
+ "উ": 8,
15
+ "ঊ": 9,
16
+ "ঋ": 10,
17
+ "এ": 11,
18
+ "ঐ": 12,
19
+ "ও": 13,
20
+ "ঔ": 14,
21
+ "ক": 15,
22
+ "খ": 16,
23
+ "গ": 17,
24
+ "ঘ": 18,
25
+ "ঙ": 19,
26
+ "চ": 20,
27
+ "ছ": 21,
28
+ "জ": 22,
29
+ "ঝ": 23,
30
+ "ঞ": 24,
31
+ "ট": 25,
32
+ "ঠ": 26,
33
+ "ড": 27,
34
+ "ঢ": 28,
35
+ "ণ": 29,
36
+ "ত": 30,
37
+ "থ": 31,
38
+ "দ": 32,
39
+ "ধ": 33,
40
+ "ন": 34,
41
+ "প": 35,
42
+ "ফ": 36,
43
+ "ব": 37,
44
+ "ভ": 38,
45
+ "ম": 39,
46
+ "য": 40,
47
+ "র": 41,
48
+ "ল": 42,
49
+ "শ": 43,
50
+ "ষ": 44,
51
+ "স": 45,
52
+ "হ": 46,
53
+ "া": 47,
54
+ "ি": 48,
55
+ "ী": 49,
56
+ "ু": 50,
57
+ "ূ": 51,
58
+ "ৃ": 52,
59
+ "ে": 53,
60
+ "ৈ": 54,
61
+ "ো": 55,
62
+ "ৌ": 56,
63
+ "্": 57,
64
+ "ৎ": 58,
65
+ "ড়": 59,
66
+ "ঢ়": 60,
67
+ "য়": 61
68
+ }