speech-test
commited on
Commit
•
fc13f44
1
Parent(s):
87491cf
Flexible resampling
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ model-index:
|
|
23 |
metrics:
|
24 |
- name: Test WER
|
25 |
type: wer
|
26 |
-
value: 32.
|
27 |
---
|
28 |
|
29 |
# Wav2Vec2-Large-XLSR-53-Ukrainian
|
@@ -82,7 +82,7 @@ from tqdm.auto import tqdm
|
|
82 |
from datasets import load_metric
|
83 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
84 |
|
85 |
-
# Download the raw data instead of using HF datasets to save space
|
86 |
data_url = "https://voice-prod-bundler-ee1969a6ce8178826482b88e843c335139bd3fb4.s3.amazonaws.com/cv-corpus-6.1-2020-12-11/uk.tar.gz"
|
87 |
filestream = urllib.request.urlopen(data_url)
|
88 |
data_file = tarfile.open(fileobj=filestream, mode="r|gz")
|
@@ -107,14 +107,13 @@ def clean_sentence(sent):
|
|
107 |
sent = " ".join(sent.split())
|
108 |
return sent
|
109 |
|
110 |
-
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
111 |
-
|
112 |
targets = []
|
113 |
preds = []
|
114 |
|
115 |
for i, row in tqdm(cv_test.iterrows(), total=cv_test.shape[0]):
|
116 |
row["sentence"] = clean_sentence(row["sentence"])
|
117 |
speech_array, sampling_rate = torchaudio.load(clips_path + row["path"])
|
|
|
118 |
row["speech"] = resampler(speech_array).squeeze().numpy()
|
119 |
|
120 |
inputs = processor(row["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
@@ -130,11 +129,10 @@ for i, row in tqdm(cv_test.iterrows(), total=cv_test.shape[0]):
|
|
130 |
print("WER: {:2f}".format(100 * wer.compute(predictions=preds, references=targets)))
|
131 |
```
|
132 |
|
133 |
-
**Test Result**: 32.
|
134 |
|
135 |
|
136 |
## Training
|
137 |
|
138 |
The Common Voice `train` and `validation` datasets were used for training.
|
139 |
|
140 |
-
The script used for training can be found [here](github.com)
|
|
|
23 |
metrics:
|
24 |
- name: Test WER
|
25 |
type: wer
|
26 |
+
value: 32.29
|
27 |
---
|
28 |
|
29 |
# Wav2Vec2-Large-XLSR-53-Ukrainian
|
|
|
82 |
from datasets import load_metric
|
83 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
84 |
|
85 |
+
# Download the raw data instead of using HF datasets to save disk space
|
86 |
data_url = "https://voice-prod-bundler-ee1969a6ce8178826482b88e843c335139bd3fb4.s3.amazonaws.com/cv-corpus-6.1-2020-12-11/uk.tar.gz"
|
87 |
filestream = urllib.request.urlopen(data_url)
|
88 |
data_file = tarfile.open(fileobj=filestream, mode="r|gz")
|
|
|
107 |
sent = " ".join(sent.split())
|
108 |
return sent
|
109 |
|
|
|
|
|
110 |
targets = []
|
111 |
preds = []
|
112 |
|
113 |
for i, row in tqdm(cv_test.iterrows(), total=cv_test.shape[0]):
|
114 |
row["sentence"] = clean_sentence(row["sentence"])
|
115 |
speech_array, sampling_rate = torchaudio.load(clips_path + row["path"])
|
116 |
+
resampler = torchaudio.transforms.Resample(sampling_rate, 16_000)
|
117 |
row["speech"] = resampler(speech_array).squeeze().numpy()
|
118 |
|
119 |
inputs = processor(row["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
|
|
129 |
print("WER: {:2f}".format(100 * wer.compute(predictions=preds, references=targets)))
|
130 |
```
|
131 |
|
132 |
+
**Test Result**: 32.29 %
|
133 |
|
134 |
|
135 |
## Training
|
136 |
|
137 |
The Common Voice `train` and `validation` datasets were used for training.
|
138 |
|
|