cpierse commited on
Commit
7d7ac4c
1 Parent(s): 511dd18

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +18 -2
README.md CHANGED
@@ -79,6 +79,22 @@ import torchaudio
79
  from datasets import load_dataset, load_metric
80
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
81
  import re
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
  test_dataset = load_dataset("common_voice", "eo", split="test") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site.
84
  wer = load_metric("wer")
@@ -87,7 +103,7 @@ processor = Wav2Vec2Processor.from_pretrained("cpierse/wav2vec2-large-xlsr-53-es
87
  model = Wav2Vec2ForCTC.from_pretrained("cpierse/wav2vec2-large-xlsr-53-esperanto")
88
  model.to("cuda")
89
 
90
- chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“]'
91
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
92
 
93
  # Preprocessing the datasets.
@@ -114,7 +130,7 @@ def evaluate(batch):
114
 
115
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
116
 
117
- print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
118
  ```
119
 
120
  **Test Result**: 14.36 %
79
  from datasets import load_dataset, load_metric
80
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
81
  import re
82
+ import jiwer
83
+
84
+ def chunked_wer(targets, predictions, chunk_size=None):
85
+ if chunk_size is None: return jiwer.wer(targets, predictions)
86
+ start = 0
87
+ end = chunk_size
88
+ H, S, D, I = 0, 0, 0, 0
89
+ while start < len(targets):
90
+ chunk_metrics = jiwer.compute_measures(targets[start:end], predictions[start:end])
91
+ H = H + chunk_metrics["hits"]
92
+ S = S + chunk_metrics["substitutions"]
93
+ D = D + chunk_metrics["deletions"]
94
+ I = I + chunk_metrics["insertions"]
95
+ start += chunk_size
96
+ end += chunk_size
97
+ return float(S + D + I) / float(H + S + D)
98
 
99
  test_dataset = load_dataset("common_voice", "eo", split="test") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site.
100
  wer = load_metric("wer")
103
  model = Wav2Vec2ForCTC.from_pretrained("cpierse/wav2vec2-large-xlsr-53-esperanto")
104
  model.to("cuda")
105
 
106
+ chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\„\«\(\»\)\’\']'
107
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
108
 
109
  # Preprocessing the datasets.
130
 
131
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
132
 
133
+ print("WER: {:2f}".format(100 * chunked_wer(predictions=result["pred_strings"], targets=result["sentence"],chunk_size=2000)))
134
  ```
135
 
136
  **Test Result**: 14.36 %