Update README.md
Browse files
README.md
CHANGED
@@ -29,61 +29,51 @@ When using this model, make sure that your speech input is sampled at 16kHz.
|
|
29 |
## Usage
|
30 |
The model can be used directly (without a language model) as follows:
|
31 |
```python
|
32 |
-
import torch
|
33 |
-
import torchaudio
|
34 |
-
from datasets import load_dataset
|
35 |
-
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
36 |
-
test_dataset = load_dataset("common_voice", "tr", split="test[:2%]").
|
37 |
-
processor = Wav2Vec2Processor.from_pretrained("akashpb13/wav2vec2-large-xlsr-Maltese")
|
38 |
-
model = Wav2Vec2ForCTC.from_pretrained("akashpb13/wav2vec2-large-xlsr-Maltese")
|
39 |
-
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
40 |
-
# Preprocessing the datasets.
|
41 |
-
# We need to read the aduio files as arrays
|
42 |
-
def speech_file_to_array_fn(batch):
|
43 |
-
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
44 |
-
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
45 |
-
return batch
|
46 |
-
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
47 |
-
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
48 |
-
with torch.no_grad():
|
49 |
-
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
|
50 |
-
predicted_ids = torch.argmax(logits, dim=-1)
|
51 |
-
print("Prediction:", processor.batch_decode(predicted_ids))
|
52 |
-
print("Reference:", test_dataset["sentence"][:2])
|
53 |
-
```
|
54 |
-
## Evaluation
|
55 |
-
The model can be evaluated as follows on the {language} test data of Common Voice.
|
56 |
-
```python
|
57 |
-
import torch
|
58 |
import torchaudio
|
59 |
from datasets import load_dataset, load_metric
|
60 |
-
from transformers import
|
|
|
|
|
|
|
|
|
61 |
import re
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
pred_ids = torch.argmax(logits, dim=-1)
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
88 |
```
|
89 |
-
**Test Result**: 32.
|
29 |
## Usage
|
30 |
The model can be used directly (without a language model) as follows:
|
31 |
```python
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
import torchaudio
|
33 |
from datasets import load_dataset, load_metric
|
34 |
+
from transformers import (
|
35 |
+
Wav2Vec2ForCTC,
|
36 |
+
Wav2Vec2Processor,
|
37 |
+
)
|
38 |
+
import torch
|
39 |
import re
|
40 |
+
import sys
|
41 |
+
|
42 |
+
model_name = "Akashpb13/xlsr_maltese_wav2vec2"
|
43 |
+
device = "cuda"
|
44 |
+
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\)\(\*)]'
|
45 |
+
|
46 |
+
model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device)
|
47 |
+
processor = Wav2Vec2Processor.from_pretrained(model_name)
|
48 |
+
|
49 |
+
ds = load_dataset("common_voice", "mt", split="test", data_dir="./cv-corpus-6.1-2020-12-11")
|
50 |
+
|
51 |
+
resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000)
|
52 |
+
|
53 |
+
def map_to_array(batch):
|
54 |
+
speech, _ = torchaudio.load(batch["path"])
|
55 |
+
batch["speech"] = resampler.forward(speech.squeeze(0)).numpy()
|
56 |
+
batch["sampling_rate"] = resampler.new_freq
|
57 |
+
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() + " "
|
58 |
+
return batch
|
59 |
+
|
60 |
+
ds = ds.map(map_to_array)
|
61 |
+
|
62 |
+
def map_to_pred(batch):
|
63 |
+
features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt")
|
64 |
+
input_values = features.input_values.to(device)
|
65 |
+
attention_mask = features.attention_mask.to(device)
|
66 |
+
with torch.no_grad():
|
67 |
+
logits = model(input_values, attention_mask=attention_mask).logits
|
68 |
pred_ids = torch.argmax(logits, dim=-1)
|
69 |
+
batch["predicted"] = processor.batch_decode(pred_ids)
|
70 |
+
batch["target"] = batch["sentence"]
|
71 |
+
return batch
|
72 |
+
|
73 |
+
result = ds.map(map_to_pred, batched=True, batch_size=16, remove_columns=list(ds.features.keys()))
|
74 |
+
|
75 |
+
wer = load_metric("wer")
|
76 |
+
print(wer.compute(predictions=result["predicted"], references=result["target"]))
|
77 |
+
|
78 |
```
|
79 |
+
**Test Result**: 32.83 %
|