g8a9 commited on
Commit
3ca9aa8
1 Parent(s): b4b8ea8

Upload lm-boosted decoder

Browse files
.gitattributes CHANGED
@@ -28,3 +28,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
  5gram-it-cv-eos.arpa filter=lfs diff=lfs merge=lfs -text
29
  kenlm_training_text.txt filter=lfs diff=lfs merge=lfs -text
30
  5gram-it-multi-ds-eos.arpa filter=lfs diff=lfs merge=lfs -text
 
 
28
  5gram-it-cv-eos.arpa filter=lfs diff=lfs merge=lfs -text
29
  kenlm_training_text.txt filter=lfs diff=lfs merge=lfs -text
30
  5gram-it-multi-ds-eos.arpa filter=lfs diff=lfs merge=lfs -text
31
+ language_model/5gram-it-ds-ytsv2.arpa filter=lfs diff=lfs merge=lfs -text
alphabet.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"labels": [" ", "<", "=", ">", "[", "]", "_", "`", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "{", "}", "~", "\u00a1", "\u00ab", "\u00b0", "\u00b4", "\u00b5", "\u00ba", "\u00bb", "\u00df", "\u00e0", "\u00e1", "\u00e3", "\u00e4", "\u00e5", "\u00e6", "\u00e8", "\u00e9", "\u00ea", "\u00eb", "\u00ec", "\u00ed", "\u00ee", "\u00ef", "\u00f0", "\u00f1", "\u00f2", "\u00f3", "\u00f4", "\u00f6", "\u00f8", "\u00f9", "\u00fa", "\u00fb", "\u00fc", "\u00fe", "\u00ff", "\u0101", "\u0105", "\u0107", "\u010d", "\u0111", "\u0117", "\u0119", "\u011b", "\u011f", "\u0127", "\u012b", "\u0131", "\u013e", "\u0142", "\u0144", "\u0148", "\u014d", "\u0151", "\u0153", "\u0159", "\u015b", "\u015f", "\u0161", "\u016b", "\u016d", "\u017a", "\u017c", "\u017e", "\u0219", "\u021b", "\u0259", "\u02b9", "\u02bb", "\u02bc", "\u02be", "\u02bf", "\u02d0", "\u0307", "\u0328", "\u0430", "\u0431", "\u0434", "\u0435", "\u043b", "\u043d", "\u043e", "\u0441", "\u0443", "\u0446", "\u044a", "\u0451", "\u0459", "\u04a3", "\u05d3", "\u05d4", "\u05d5", "\u0629", "\u0631", "\u0633", "\u0635", "\u063a", "\u0644", "\u064a", "\u1e25", "\u1e5b", "\u1e63", "\u1e6d", "\u1ec5", "\u2011", "\u2013", "\u2014", "\u2019", "\u201e", "\u2026", "\u2032", "\u2606", "\u3042", "\u30a2", "\u30ab", "\u30ad", "\u30b5", "\u30b6", "\u30ce", "\u30d5", "\u30ea", "\u30f3", "\u30fb", "\u30fc", "\u4e07", "\u4e09", "\u4e30", "\u53e4", "\u591a", "\u5bb6", "\u5cf0", "\u5f35", "\u65c5", "\u7985", "\ua78c", "\uc2ec", "\uc545", "\uc74c", "\uc911", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
language_model/5gram-it-ds-ytsv2.arpa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60235d34528b2dc4e53323d3732958a072712157e6af1d83785dd79755218051
3
+ size 1686657745
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/unigrams.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "../wav2vec2-xls-r-300m-italian", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "dbdmg/wav2vec2-xls-r-300m-italian", "tokenizer_class": "Wav2Vec2CTCTokenizer"}