Update README.md
Browse files
README.md
CHANGED
@@ -1,8 +1,7 @@
|
|
1 |
---
|
2 |
-
language: bem
|
3 |
datasets:
|
4 |
-
- BembaSpeech
|
5 |
-
#- TODO: add more datasets if you have used additional datasets. Make sure to use the exact same dataset name as the one found [here](https://huggingface.co/datasets). If the dataset can not be found in the official datasets, just give it a new name
|
6 |
metrics:
|
7 |
- wer
|
8 |
tags:
|
@@ -12,25 +11,24 @@ tags:
|
|
12 |
- xlsr-fine-tuning-week
|
13 |
license: apache-2.0
|
14 |
model-index:
|
15 |
-
- name: XLSR Wav2Vec2 Bemba by Claytone Sikasote
|
16 |
results:
|
17 |
- task:
|
18 |
name: Speech Recognition
|
19 |
type: automatic-speech-recognition
|
20 |
dataset:
|
21 |
-
name: BembaSpeech bem
|
22 |
type: bembaspeech
|
23 |
-
args: bem
|
24 |
metrics:
|
25 |
- name: Test WER
|
26 |
type: wer
|
27 |
-
value: 42.14
|
28 |
---
|
29 |
|
30 |
# Wav2Vec2-Large-XLSR-53-Bemba #TODO: replace language with your {language}, *e.g.* French
|
31 |
|
32 |
-
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Bemba using the [BembaSpeech](https://csikasote.github.io/BembaSpeech).
|
33 |
-
When using this model, make sure that your speech input is sampled at 16kHz.
|
34 |
|
35 |
## Usage
|
36 |
|
@@ -42,25 +40,25 @@ import torchaudio
|
|
42 |
from datasets import load_dataset
|
43 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
44 |
|
45 |
-
test_dataset = load_dataset("common_voice", "{lang_id}", split="test[:2%]")
|
46 |
|
47 |
-
processor = Wav2Vec2Processor.from_pretrained("{model_id}")
|
48 |
-
model = Wav2Vec2ForCTC.from_pretrained("{model_id}")
|
49 |
|
50 |
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
51 |
|
52 |
# Preprocessing the datasets.
|
53 |
# We need to read the aduio files as arrays
|
54 |
def speech_file_to_array_fn(batch):
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
|
59 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
60 |
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
61 |
|
62 |
with torch.no_grad():
|
63 |
-
|
64 |
|
65 |
predicted_ids = torch.argmax(logits, dim=-1)
|
66 |
|
@@ -71,7 +69,7 @@ print("Reference:", test_dataset["sentence"][:2])
|
|
71 |
|
72 |
## Evaluation
|
73 |
|
74 |
-
The model can be evaluated as follows on the Bemba test data of BembaSpeech.
|
75 |
|
76 |
|
77 |
```python
|
@@ -81,48 +79,47 @@ from datasets import load_dataset, load_metric
|
|
81 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
82 |
import re
|
83 |
|
84 |
-
test_dataset = load_dataset("common_voice", "{lang_id}", split="test")
|
85 |
wer = load_metric("wer")
|
86 |
|
87 |
-
processor = Wav2Vec2Processor.from_pretrained("
|
88 |
-
model = Wav2Vec2ForCTC.from_pretrained("
|
89 |
model.to("cuda")
|
90 |
|
91 |
-
chars_to_ignore_regex = '[
|
92 |
#resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
93 |
|
94 |
# Preprocessing the datasets.
|
95 |
# We need to read the aduio files as arrays
|
96 |
def speech_file_to_array_fn(batch):
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
|
102 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
103 |
|
104 |
# Preprocessing the datasets.
|
105 |
# We need to read the aduio files as arrays
|
106 |
def evaluate(batch):
|
107 |
-
|
108 |
|
109 |
-
|
110 |
-
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
|
116 |
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
117 |
|
118 |
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
119 |
```
|
120 |
|
121 |
-
**Test Result**: 42.14 %
|
122 |
-
|
123 |
|
124 |
## Training
|
125 |
|
126 |
The BembaSpeech `train`, `dev` and `test` datasets were used for training, development and evaluation respectively # TODO: adapt to state all the datasets that were used for training.
|
127 |
|
128 |
-
The script used for training can be found [here](https://colab.research.google.com/drive/1IgdR-EQq5rgmBqw5O6tcfJpmXM8rDX55?usp=sharing)
|
|
|
1 |
---
|
2 |
+
language: bem
|
3 |
datasets:
|
4 |
+
- BembaSpeech
|
|
|
5 |
metrics:
|
6 |
- wer
|
7 |
tags:
|
|
|
11 |
- xlsr-fine-tuning-week
|
12 |
license: apache-2.0
|
13 |
model-index:
|
14 |
+
- name: XLSR Wav2Vec2 Bemba by Claytone Sikasote
|
15 |
results:
|
16 |
- task:
|
17 |
name: Speech Recognition
|
18 |
type: automatic-speech-recognition
|
19 |
dataset:
|
20 |
+
name: BembaSpeech bem
|
21 |
type: bembaspeech
|
22 |
+
args: bem
|
23 |
metrics:
|
24 |
- name: Test WER
|
25 |
type: wer
|
26 |
+
value: 42.14
|
27 |
---
|
28 |
|
29 |
# Wav2Vec2-Large-XLSR-53-Bemba #TODO: replace language with your {language}, *e.g.* French
|
30 |
|
31 |
+
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Bemba using the [BembaSpeech](https://csikasote.github.io/BembaSpeech). When using this model, make sure that your speech input is sampled at 16kHz.
|
|
|
32 |
|
33 |
## Usage
|
34 |
|
|
|
40 |
from datasets import load_dataset
|
41 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
42 |
|
43 |
+
test_dataset = load_dataset("common_voice", "{lang_id}", split="test[:2%]")
|
44 |
|
45 |
+
processor = Wav2Vec2Processor.from_pretrained("{model_id}")
|
46 |
+
model = Wav2Vec2ForCTC.from_pretrained("{model_id}")
|
47 |
|
48 |
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
49 |
|
50 |
# Preprocessing the datasets.
|
51 |
# We need to read the aduio files as arrays
|
52 |
def speech_file_to_array_fn(batch):
|
53 |
+
\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
|
54 |
+
\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
|
55 |
+
\treturn batch
|
56 |
|
57 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
58 |
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
59 |
|
60 |
with torch.no_grad():
|
61 |
+
\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
|
62 |
|
63 |
predicted_ids = torch.argmax(logits, dim=-1)
|
64 |
|
|
|
69 |
|
70 |
## Evaluation
|
71 |
|
72 |
+
The model can be evaluated as follows on the Bemba test data of BembaSpeech.
|
73 |
|
74 |
|
75 |
```python
|
|
|
79 |
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
80 |
import re
|
81 |
|
82 |
+
test_dataset = load_dataset("common_voice", "{lang_id}", split="test")
|
83 |
wer = load_metric("wer")
|
84 |
|
85 |
+
processor = Wav2Vec2Processor.from_pretrained("csikasote/wav2vec2-large-xlsr-bemba")
|
86 |
+
model = Wav2Vec2ForCTC.from_pretrained("csikasote/wav2vec2-large-xlsr-bemba")
|
87 |
model.to("cuda")
|
88 |
|
89 |
+
chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“]'
|
90 |
#resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
91 |
|
92 |
# Preprocessing the datasets.
|
93 |
# We need to read the aduio files as arrays
|
94 |
def speech_file_to_array_fn(batch):
|
95 |
+
\tbatch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
|
96 |
+
\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
|
97 |
+
\tbatch["speech"] = speech_array.squeeze().numpy()
|
98 |
+
\treturn batch
|
99 |
|
100 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
101 |
|
102 |
# Preprocessing the datasets.
|
103 |
# We need to read the aduio files as arrays
|
104 |
def evaluate(batch):
|
105 |
+
\tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
106 |
|
107 |
+
\twith torch.no_grad():
|
108 |
+
\t\tlogits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
|
109 |
|
110 |
+
\tpred_ids = torch.argmax(logits, dim=-1)
|
111 |
+
\tbatch["pred_strings"] = processor.batch_decode(pred_ids)
|
112 |
+
\treturn batch
|
113 |
|
114 |
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
115 |
|
116 |
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
117 |
```
|
118 |
|
119 |
+
**Test Result**: 42.14 %
|
|
|
120 |
|
121 |
## Training
|
122 |
|
123 |
The BembaSpeech `train`, `dev` and `test` datasets were used for training, development and evaluation respectively # TODO: adapt to state all the datasets that were used for training.
|
124 |
|
125 |
+
The script used for training can be found [here](https://colab.research.google.com/drive/1IgdR-EQq5rgmBqw5O6tcfJpmXM8rDX55?usp=sharing).
|