vumichien commited on
Commit
74f6be3
1 Parent(s): c65c0cc

Upload LM model

Browse files
alphabet.json ADDED
@@ -0,0 +1 @@
 
1
+ {"labels": ["v", "\u3059", "\u304a", "\u3056", "\u3093", "n", "\u3075", "\u3052", "\u3082", "\u307f", "\u307e", "\u3049", "w", "\u3055", "\u3094", "\u306e", "\u3066", "\u3069", "q", "\u3076", "\u3060", "\u3081", "\u3080", "\u304c", "\u3079", "e", "\u3071", "\u3051", "c", "\u304f", "\u3042", "\u305f", "\u3062", "\u3096", "\u307c", "t", "l", "\u3047", "p", "f", "\u3068", "\u3092", "\u3070", "\u307d", "\u30fc", "\u3084", "\u305e", "\u306a", "\u3064", "\u3044", "\u308d", "\u3078", "\u306f", "\u306d", "\u308a", "\u307a", "\u307b", "\u304e", "y", "g", "\u3043", "\u3072", "\u305d", "&", "\u305b", "m", "\u3058", "\u3088", "o", " ", "z", "\u308b", "\u3053", "\u3005", "\u3061", "\u3086", "\u3045", "\u3063", "\u3041", "\u3046", "\u3057", "\u308f", "\u308c", "k", "a", "\u3054", "\u3067", "u", "\u3065", "b", "\u3083", "\u3085", "\u305c", "d", "\u304d", "\u3050", "\u3087", "\u7e6b", "\u306c", "\u3073", "\u306b", "h", "\u304b", "r", "\\", "i", "\u3074", "\u3089", "x", "\u3077", "j", "\u3048", "s", "\u305a", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
language_model/5gram.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59e31dc14e41f84c5c5e267870c688ad08f0b6f3aca170b2fddfc58fbd39e9f8
3
+ size 359416671
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/unigrams.txt ADDED
The diff for this file is too large to render. See raw diff
preprocessor_config.json CHANGED
@@ -4,6 +4,7 @@
4
  "feature_size": 1,
5
  "padding_side": "right",
6
  "padding_value": 0,
 
7
  "return_attention_mask": true,
8
  "sampling_rate": 16000
9
  }
4
  "feature_size": 1,
5
  "padding_side": "right",
6
  "padding_value": 0,
7
+ "processor_class": "Wav2Vec2ProcessorWithLM",
8
  "return_attention_mask": true,
9
  "sampling_rate": 16000
10
  }
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "special_tokens_map_file": null, "name_or_path": "wav2vec2-xls-r-300m-japanese-vocab/", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "special_tokens_map_file": null, "name_or_path": "vumichien/wav2vec2-xls-r-300m-japanese-large-ver2", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2ProcessorWithLM"}