speech-test commited on
Commit
d34e1f9
1 Parent(s): fb04b90

New model results

Browse files
Files changed (1) hide show
  1. README.md +48 -41
README.md CHANGED
@@ -23,7 +23,7 @@ model-index:
23
  metrics:
24
  - name: Test WER
25
  type: wer
26
- value: 22.36
27
  ---
28
 
29
  # Wav2Vec2-Large-XLSR-53-Russian
@@ -49,17 +49,17 @@ model = Wav2Vec2ForCTC.from_pretrained("anton-l/wav2vec2-large-xlsr-53-russian")
49
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
50
 
51
  # Preprocessing the datasets.
52
- # We need to read the aduio files as arrays
53
  def speech_file_to_array_fn(batch):
54
- \tspeech_array, sampling_rate = torchaudio.load(batch["path"])
55
- \tbatch["speech"] = resampler(speech_array).squeeze().numpy()
56
- \treturn batch
57
 
58
  test_dataset = test_dataset.map(speech_file_to_array_fn)
59
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
60
 
61
  with torch.no_grad():
62
- \tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
63
 
64
  predicted_ids = torch.argmax(logits, dim=-1)
65
 
@@ -70,67 +70,74 @@ print("Reference:", test_dataset["sentence"][:2])
70
 
71
  ## Evaluation
72
 
73
- The model can be evaluated as follows on the {language} test data of Common Voice. # TODO: replace #TODO: replace language with your {language}, *e.g.* French
74
 
75
 
76
  ```python
77
  import torch
78
  import torchaudio
79
- from datasets import load_dataset, load_metric
 
 
 
 
80
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
81
- import re
82
 
83
- test_dataset = load_dataset("common_voice", "ru", split="test")
 
 
 
 
 
84
  wer = load_metric("wer")
85
 
86
  processor = Wav2Vec2Processor.from_pretrained("anton-l/wav2vec2-large-xlsr-53-russian")
87
  model = Wav2Vec2ForCTC.from_pretrained("anton-l/wav2vec2-large-xlsr-53-russian")
88
  model.to("cuda")
89
 
90
- resampler = torchaudio.transforms.Resample(48_000, 16_000)
 
91
 
92
  def clean_sentence(sent):
93
- \tsent = sent.lower()
94
- \t# replace non-alphanumeric characters with space ("какой-то, вот" -> "какой то вот")
95
- \tsent = "".join(ch if ch.isalnum() else " " for ch in sent)
96
- \t# remove repeated spaces
97
- \tsent = " ".join(sent.split())
98
- \t# these letters are considered equivalent in written Russian
99
- \tsent = sent.replace('ё', 'е')
100
- \treturn sent
101
 
102
- # Preprocessing the datasets.
103
- # We need to read the aduio files as arrays
104
- def speech_file_to_array_fn(batch):
105
- \tbatch["sentence"] = clean_sentence(batch["sentence"])
106
- \tspeech_array, sampling_rate = torchaudio.load(batch["path"])
107
- \tbatch["speech"] = resampler(speech_array).squeeze().numpy()
108
- \treturn batch
109
 
110
- test_dataset = test_dataset.map(speech_file_to_array_fn)
 
 
 
 
111
 
112
- # Preprocessing the datasets.
113
- # We need to read the aduio files as arrays
114
- def evaluate(batch):
115
- \tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
116
 
117
- \twith torch.no_grad():
118
- \t\tlogits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
119
 
120
- \tpred_ids = torch.argmax(logits, dim=-1)
121
- \tbatch["pred_strings"] = processor.batch_decode(pred_ids)
122
- \treturn batch
123
 
124
- result = test_dataset.map(evaluate, batched=True, batch_size=8)
 
125
 
126
- print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
 
 
 
 
 
127
  ```
128
 
129
- **Test Result**: 22.36 %
130
 
131
 
132
  ## Training
133
 
134
  The Common Voice `train` and `validation` datasets were used for training.
135
-
136
- The script used for training can be found [here](github.com)
 
23
  metrics:
24
  - name: Test WER
25
  type: wer
26
+ value: 18.44
27
  ---
28
 
29
  # Wav2Vec2-Large-XLSR-53-Russian
 
49
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
50
 
51
  # Preprocessing the datasets.
52
+ # We need to read the audio files as arrays
53
  def speech_file_to_array_fn(batch):
54
+ speech_array, sampling_rate = torchaudio.load(batch["path"])
55
+ batch["speech"] = resampler(speech_array).squeeze().numpy()
56
+ return batch
57
 
58
  test_dataset = test_dataset.map(speech_file_to_array_fn)
59
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
60
 
61
  with torch.no_grad():
62
+ \\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
63
 
64
  predicted_ids = torch.argmax(logits, dim=-1)
65
 
 
70
 
71
  ## Evaluation
72
 
73
+ The model can be evaluated as follows on the Russian test data of Common Voice.
74
 
75
 
76
  ```python
77
  import torch
78
  import torchaudio
79
+ import urllib.request
80
+ import tarfile
81
+ import pandas as pd
82
+ from tqdm.auto import tqdm
83
+ from datasets import load_metric
84
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
 
85
 
86
+ # Download the raw data instead of using HF datasets to save disk space
87
+ data_url = "https://voice-prod-bundler-ee1969a6ce8178826482b88e843c335139bd3fb4.s3.amazonaws.com/cv-corpus-6.1-2020-12-11/ru.tar.gz"
88
+ filestream = urllib.request.urlopen(data_url)
89
+ data_file = tarfile.open(fileobj=filestream, mode="r|gz")
90
+ data_file.extractall()
91
+
92
  wer = load_metric("wer")
93
 
94
  processor = Wav2Vec2Processor.from_pretrained("anton-l/wav2vec2-large-xlsr-53-russian")
95
  model = Wav2Vec2ForCTC.from_pretrained("anton-l/wav2vec2-large-xlsr-53-russian")
96
  model.to("cuda")
97
 
98
+ cv_test = pd.read_csv("cv-corpus-6.1-2020-12-11/ru/test.tsv", sep='\t')
99
+ clips_path = "cv-corpus-6.1-2020-12-11/ru/clips/"
100
 
101
  def clean_sentence(sent):
102
+ sent = sent.lower()
103
+ # these letters are considered equivalent in written Russian
104
+ sent = sent.replace('ё', 'е')
105
+ # replace non-alpha characters with space
106
+ sent = "".join(ch if ch.isalpha() else " " for ch in sent)
107
+ # remove repeated spaces
108
+ sent = " ".join(sent.split())
109
+ return sent
110
 
111
+ targets = []
112
+ preds = []
 
 
 
 
 
113
 
114
+ for i, row in tqdm(cv_test.iterrows(), total=cv_test.shape[0]):
115
+ row["sentence"] = clean_sentence(row["sentence"])
116
+ speech_array, sampling_rate = torchaudio.load(clips_path + row["path"])
117
+ resampler = torchaudio.transforms.Resample(sampling_rate, 16_000)
118
+ row["speech"] = resampler(speech_array).squeeze().numpy()
119
 
120
+ inputs = processor(row["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
 
 
 
121
 
122
+ with torch.no_grad():
123
+ logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
124
 
125
+ pred_ids = torch.argmax(logits, dim=-1)
 
 
126
 
127
+ targets.append(row["sentence"])
128
+ preds.append(processor.batch_decode(pred_ids)[0])
129
 
130
+ # free up some memory
131
+ del model
132
+ del processor
133
+ del cv_test
134
+
135
+ print("WER: {:2f}".format(100 * wer.compute(predictions=preds, references=targets)))
136
  ```
137
 
138
+ **Test Result**: 18.44 %
139
 
140
 
141
  ## Training
142
 
143
  The Common Voice `train` and `validation` datasets were used for training.