jonatasgrosman commited on
Commit
e4ab2bd
1 Parent(s): 666ada0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +17 -10
README.md CHANGED
@@ -133,22 +133,29 @@ test_dataset = test_dataset.map(speech_file_to_array_fn)
133
  # Preprocessing the datasets.
134
  # We need to read the audio files as arrays
135
  def evaluate(batch):
136
- inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
137
 
138
- with torch.no_grad():
139
- logits = model(inputs.input_values.to(DEVICE), attention_mask=inputs.attention_mask.to(DEVICE)).logits
140
 
141
- pred_ids = torch.argmax(logits, dim=-1)
142
- batch["pred_strings"] = processor.batch_decode(pred_ids)
143
- return batch
144
 
145
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
146
 
147
- print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"], chunk_size=1000)))
148
- print("CER: {:2f}".format(100 * cer.compute(predictions=result["pred_strings"], references=result["sentence"], chunk_size=1000)))
 
 
 
149
  ```
150
 
151
  **Test Result**:
152
 
153
- - WER: 16.79%
154
- - CER: 3.68%
 
 
 
 
133
  # Preprocessing the datasets.
134
  # We need to read the audio files as arrays
135
  def evaluate(batch):
136
+ \tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
137
 
138
+ \twith torch.no_grad():
139
+ \t\tlogits = model(inputs.input_values.to(DEVICE), attention_mask=inputs.attention_mask.to(DEVICE)).logits
140
 
141
+ \tpred_ids = torch.argmax(logits, dim=-1)
142
+ \tbatch["pred_strings"] = processor.batch_decode(pred_ids)
143
+ \treturn batch
144
 
145
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
146
 
147
+ predictions = [x.upper() for x in result["pred_strings"]]
148
+ references = [x.upper() for x in result["sentence"]]
149
+
150
+ print(f"WER: {wer.compute(predictions=predictions, references=references, chunk_size=1000) * 100}")
151
+ print(f"CER: {cer.compute(predictions=predictions, references=references, chunk_size=1000) * 100}")
152
  ```
153
 
154
  **Test Result**:
155
 
156
+ My model may report better scores than others because of some specificity of my evaluation script, so I ran the same evaluation script on other models (on 2021-04-22) to make a fairer comparison.
157
+
158
+ | Model | WER | CER |
159
+ | ------------- | ------------- | ------------- |
160
+ | jonatasgrosman/wav2vec2-large-xlsr-53-russian | **16.79%** | **3.68%** |
161
+ | anton-l/wav2vec2-large-xlsr-53-russian | 19.49% | 4.15% |