Upload lm-boosted decoder
Browse files- .gitattributes +1 -0
- added_tokens.json +4 -0
- alphabet.json +1 -0
- language_model/5gram_correct.arpa +3 -0
- language_model/attrs.json +1 -0
- language_model/unigrams.txt +0 -0
- preprocessor_config.json +1 -0
- special_tokens_map.json +16 -0
- tokenizer_config.json +3 -0
.gitattributes
CHANGED
@@ -29,3 +29,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
32 |
+
language_model/5gram_correct.arpa filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</s>": 52,
|
3 |
+
"<s>": 51
|
4 |
+
}
|
alphabet.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"labels": ["\u315c", "\u3152", "\u3141", "\u3160", "\u3143", "\u314c", "\u3158", "\u313c", "\u3138", "\u314b", "\u3136", "\u3135", "\u314a", "\u3148", "\u3144", "\u3149", "\u315e", "\u3151", "\u315b", "\u3131", " ", "\u3154", "\u3161", "\u3163", "\u314e", "\u3155", "\u313b", "\u3162", "\u3157", "\u3133", "\u314d", "\u3134", "\u3146", "\u3153", "\u3139", "\u315a", "\u315f", "\u3145", "\u3132", "\u3142", "\u3140", "\u3147", "\u313a", "\u3137", "\u315d", "\u3159", "\u3150", "\u314f", "\u3156", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
|
language_model/5gram_correct.arpa
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90dd0d7198b1734fd70755114a3530823f3689bf91815b164208f51a3d6e7541
|
3 |
+
size 17749133
|
language_model/attrs.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
language_model/unigrams.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
preprocessor_config.json
CHANGED
@@ -4,6 +4,7 @@
|
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
"padding_value": 0.0,
|
|
|
7 |
"return_attention_mask": false,
|
8 |
"sampling_rate": 16000
|
9 |
}
|
|
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
"padding_value": 0.0,
|
7 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
8 |
"return_attention_mask": false,
|
9 |
"sampling_rate": 16000
|
10 |
}
|
special_tokens_map.json
CHANGED
@@ -1,4 +1,20 @@
|
|
1 |
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
"bos_token": "<s>",
|
3 |
"eos_token": "</s>",
|
4 |
"pad_token": "[PAD]",
|
|
|
1 |
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
{
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": true,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"content": "</s>",
|
12 |
+
"lstrip": false,
|
13 |
+
"normalized": true,
|
14 |
+
"rstrip": false,
|
15 |
+
"single_word": false
|
16 |
+
}
|
17 |
+
],
|
18 |
"bos_token": "<s>",
|
19 |
"eos_token": "</s>",
|
20 |
"pad_token": "[PAD]",
|
tokenizer_config.json
CHANGED
@@ -2,8 +2,11 @@
|
|
2 |
"bos_token": "<s>",
|
3 |
"do_lower_case": false,
|
4 |
"eos_token": "</s>",
|
|
|
5 |
"pad_token": "[PAD]",
|
|
|
6 |
"replace_word_delimiter_char": " ",
|
|
|
7 |
"tokenizer_class": "Wav2Vec2CTCTokenizer",
|
8 |
"unk_token": "[UNK]",
|
9 |
"word_delimiter_token": "|"
|
|
|
2 |
"bos_token": "<s>",
|
3 |
"do_lower_case": false,
|
4 |
"eos_token": "</s>",
|
5 |
+
"name_or_path": "NX2411/wav2vec2-large-xlsr-korean-demo-with-LM",
|
6 |
"pad_token": "[PAD]",
|
7 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
8 |
"replace_word_delimiter_char": " ",
|
9 |
+
"special_tokens_map_file": "/root/.cache/huggingface/transformers/51a567f056bb60dc09ae81a627c2dd36ec1f24f8621a9a8da06d686b836c65f8.fea372b8528a479b7415f13ca4e27a2f5f3782cbb3f15b4d19bb3cbe734e8137",
|
10 |
"tokenizer_class": "Wav2Vec2CTCTokenizer",
|
11 |
"unk_token": "[UNK]",
|
12 |
"word_delimiter_token": "|"
|