arampacha commited on
Commit
6c8024b
1 Parent(s): a69b27b

upd readme

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -25,9 +25,9 @@ model-index:
25
  value: 37.72
26
  ---
27
 
28
- # Wav2Vec2-Large-XLSR-53-Chech
29
 
30
- Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Czech using the [Common Voice](https://huggingface.co/datasets/common_voice) dataset.
31
 
32
  When using this model, make sure that your speech input is sampled at 16kHz.
33
 
@@ -42,7 +42,7 @@ import torchaudio
42
  from datasets import load_dataset
43
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
44
 
45
- test_dataset = load_dataset("common_voice", "cs", split="test[:2%]")
46
 
47
  processor = Wav2Vec2Processor.from_pretrained("arampacha/wav2vec2-large-xlsr-ukrainian")
48
  model = Wav2Vec2ForCTC.from_pretrained("arampacha/wav2vec2-large-xlsr-ukrainian")
@@ -82,11 +82,11 @@ from datasets import load_dataset, load_metric
82
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
83
  import re
84
 
85
- test_dataset = load_dataset("common_voice", "cs", split="test")
86
 
87
  wer = load_metric("wer")
88
- processor = Wav2Vec2Processor.from_pretrained("arampacha/wav2vec2-large-xlsr-czech")
89
- model = Wav2Vec2ForCTC.from_pretrained("arampacha/wav2vec2-large-xlsr-czech")
90
  model.to("cuda")
91
 
92
  chars_to_ignore = [",", "?", ".", "!", "-", ";", ":", '""', "%", "'", '"', "�", '«', '»', '—', '…', '(', ')', '*', '”', '“']
 
25
  value: 37.72
26
  ---
27
 
28
+ # Wav2Vec2-Large-XLSR-53-Ukrainian
29
 
30
+ Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Ukrainian using the [Common Voice](https://huggingface.co/datasets/common_voice) dataset.
31
 
32
  When using this model, make sure that your speech input is sampled at 16kHz.
33
 
 
42
  from datasets import load_dataset
43
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
44
 
45
+ test_dataset = load_dataset("common_voice", "uk", split="test[:2%]")
46
 
47
  processor = Wav2Vec2Processor.from_pretrained("arampacha/wav2vec2-large-xlsr-ukrainian")
48
  model = Wav2Vec2ForCTC.from_pretrained("arampacha/wav2vec2-large-xlsr-ukrainian")
 
82
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
83
  import re
84
 
85
+ test_dataset = load_dataset("common_voice", "uk", split="test")
86
 
87
  wer = load_metric("wer")
88
+ processor = Wav2Vec2Processor.from_pretrained("arampacha/wav2vec2-large-xlsr-ukrainian")
89
+ model = Wav2Vec2ForCTC.from_pretrained("arampacha/wav2vec2-large-xlsr-ukrainian")
90
  model.to("cuda")
91
 
92
  chars_to_ignore = [",", "?", ".", "!", "-", ";", ":", '""', "%", "'", '"', "�", '«', '»', '—', '…', '(', ')', '*', '”', '“']