tweak
Browse files
README.md
CHANGED
@@ -54,7 +54,7 @@ print("Prediction:", processor.batch_decode(predicted_ids))
|
|
54 |
print("Reference:", test_dataset["sentence"][:2])
|
55 |
```
|
56 |
## Evaluation
|
57 |
-
The model can be evaluated as follows on the
|
58 |
```python
|
59 |
import torch
|
60 |
import torchaudio
|
@@ -77,7 +77,7 @@ def speech_file_to_array_fn(batch):
|
|
77 |
return batch
|
78 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
79 |
# Preprocessing the datasets.
|
80 |
-
# We need to read the
|
81 |
def evaluate(batch):
|
82 |
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
83 |
with torch.no_grad():
|
|
|
54 |
print("Reference:", test_dataset["sentence"][:2])
|
55 |
```
|
56 |
## Evaluation
|
57 |
+
The model can be evaluated as follows on the Latvian test data of Common Voice.
|
58 |
```python
|
59 |
import torch
|
60 |
import torchaudio
|
|
|
77 |
return batch
|
78 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
79 |
# Preprocessing the datasets.
|
80 |
+
# We need to read the audio files as arrays
|
81 |
def evaluate(batch):
|
82 |
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
83 |
with torch.no_grad():
|