jonatasgrosman commited on
Commit
52090f5
1 Parent(s): eb3daff

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -8
README.md CHANGED
@@ -24,10 +24,10 @@ model-index:
24
  metrics:
25
  - name: Test WER
26
  type: wer
27
- value: 11.90
28
  - name: Test CER
29
  type: cer
30
- value: 2.94
31
  ---
32
 
33
  # Wav2Vec2-Large-XLSR-53-Persian
@@ -132,14 +132,14 @@ test_dataset = test_dataset.map(speech_file_to_array_fn)
132
  # Preprocessing the datasets.
133
  # We need to read the audio files as arrays
134
  def evaluate(batch):
135
- inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
136
 
137
- with torch.no_grad():
138
- logits = model(inputs.input_values.to(DEVICE), attention_mask=inputs.attention_mask.to(DEVICE)).logits
139
 
140
- pred_ids = torch.argmax(logits, dim=-1)
141
- batch["pred_strings"] = processor.batch_decode(pred_ids)
142
- return batch
143
 
144
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
145
 
 
24
  metrics:
25
  - name: Test WER
26
  type: wer
27
+ value: 30.12
28
  - name: Test CER
29
  type: cer
30
+ value: 7.37
31
  ---
32
 
33
  # Wav2Vec2-Large-XLSR-53-Persian
 
132
  # Preprocessing the datasets.
133
  # We need to read the audio files as arrays
134
  def evaluate(batch):
135
+ \tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
136
 
137
+ \twith torch.no_grad():
138
+ \t\tlogits = model(inputs.input_values.to(DEVICE), attention_mask=inputs.attention_mask.to(DEVICE)).logits
139
 
140
+ \tpred_ids = torch.argmax(logits, dim=-1)
141
+ \tbatch["pred_strings"] = processor.batch_decode(pred_ids)
142
+ \treturn batch
143
 
144
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
145