ntinouldinho
commited on
Commit
•
a98105d
1
Parent(s):
39dd005
Upload lm-boosted decoder
Browse files- added_tokens.json +1 -0
- alphabet.json +1 -0
- language_model/4gram.bin +3 -0
- language_model/attrs.json +1 -0
- language_model/unigrams.txt +0 -0
- preprocessor_config.json +10 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
alphabet.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"labels": ["\u03c8", "\u03c1", "\u03c6", "\u03c0", "m", "\u03bf", "t", "\u03cb", "\u03c3", "\u03ca", "g", "\u00b4", "e", "\u03cd", "\u0390", "\u2019", "a", "\u03c4", "\u03b4", "\u03b6", "r", "\u03bb", "\u03b8", "\u00b7", "\u00bb", "\u00ab", "\u03b2", "\u03ba", "\u03bd", "n", " ", "\u03c7", "\u03b7", "\u03b3", "\u03ac", "\u03c9", "\u03ad", "\u03cc", "\u03bc", "\u03c2", "o", "\u03be", "\u03ae", "\u03ce", "\u03b5", "\u03b9", "h", "\u03b1", "\u03af", "'", "v", "\u03c5", "\u0301", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
|
language_model/4gram.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:566399edd87128b6508e8d7186a74653483a96d83a73a8975df24a277b85908b
|
3 |
+
size 847162872
|
language_model/attrs.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
language_model/unigrams.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
preprocessor_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0.0,
|
7 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
8 |
+
"return_attention_mask": true,
|
9 |
+
"sampling_rate": 16000
|
10 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": null, "eos_token": null, "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": null, "eos_token": null, "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "special_tokens_map_file": "/root/.cache/huggingface/transformers/7583eec6f0fc5d29c5d19e109f1737ce0da6c2c4c27426590e235ea7a79499f8.a21d51735cf8667bcd610f057e88548d5d6a381401f6b4501a8bc6c1a9dc8498", "name_or_path": "lighteternal/wav2vec2-large-xlsr-53-greek", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2ProcessorWithLM"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"ψ": 0, "ρ": 1, "φ": 2, "π": 3, "m": 4, "ο": 5, "t": 6, "ϋ": 7, "σ": 8, "ϊ": 9, "g": 10, "´": 11, "e": 12, "ύ": 13, "ΐ": 14, "’": 15, "a": 16, "τ": 17, "δ": 18, "ζ": 19, "r": 20, "λ": 21, "θ": 22, "·": 23, "»": 24, "«": 25, "β": 26, "κ": 27, "ν": 28, "n": 29, "χ": 31, "η": 32, "γ": 33, "ά": 34, "ω": 35, "έ": 36, "ό": 37, "μ": 38, "ς": 39, "o": 40, "ξ": 41, "ή": 42, "ώ": 43, "ε": 44, "ι": 45, "h": 46, "α": 47, "ί": 48, "'": 49, "v": 50, "υ": 51, "́": 52, "|": 30, "[UNK]": 53, "[PAD]": 54}
|