bofenghuang
commited on
Commit
•
2f4ae8e
1
Parent(s):
9ae2cb9
updt README.md
Browse files
README.md
CHANGED
@@ -79,55 +79,78 @@ model-index:
|
|
79 |
# Fine-tuned Wav2Vec2 XLS-R 1B model for ASR in French
|
80 |
|
81 |
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the POLINAETERNA/VOXPOPULI - FR dataset.
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
|
127 |
|
128 |
## Evaluation
|
129 |
|
130 |
-
1. To evaluate on `
|
131 |
|
132 |
```bash
|
133 |
python eval.py \
|
|
|
79 |
# Fine-tuned Wav2Vec2 XLS-R 1B model for ASR in French
|
80 |
|
81 |
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the POLINAETERNA/VOXPOPULI - FR dataset.
|
82 |
+
|
83 |
+
|
84 |
+
## Usage
|
85 |
+
|
86 |
+
1. To use on a local audio file without the language model
|
87 |
+
|
88 |
+
```python
|
89 |
+
import torch
|
90 |
+
import torchaudio
|
91 |
+
|
92 |
+
from transformers import AutoModelForCTC, Wav2Vec2Processor
|
93 |
+
|
94 |
+
processor = Wav2Vec2Processor.from_pretrained("bhuang/wav2vec2-xls-r-1b-voxpopuli-fr")
|
95 |
+
model = AutoModelForCTC.from_pretrained("bhuang/wav2vec2-xls-r-1b-voxpopuli-fr").cuda()
|
96 |
+
|
97 |
+
# path to your audio file
|
98 |
+
wav_path = "/projects/bhuang/corpus/speech/multilingual-tedx/fr-fr/flac/09UU0I9gLNc_0.flac"
|
99 |
+
waveform, sample_rate = torchaudio.load(wav_path)
|
100 |
+
waveform = waveform.squeeze(axis=0) # mono
|
101 |
+
|
102 |
+
# resample
|
103 |
+
if sample_rate != 16_000:
|
104 |
+
resampler = torchaudio.transforms.Resample(sample_rate, 16_000)
|
105 |
+
waveform = resampler(waveform)
|
106 |
+
|
107 |
+
# normalize
|
108 |
+
input_dict = processor(waveform, sampling_rate=16_000, return_tensors="pt")
|
109 |
+
|
110 |
+
with torch.inference_mode():
|
111 |
+
logits = model(input_dict.input_values.to("cuda")).logits
|
112 |
+
|
113 |
+
# decode
|
114 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
115 |
+
predicted_sentence = processor.batch_decode(predicted_ids)[0]
|
116 |
+
```
|
117 |
+
|
118 |
+
2. To use on a local audio file with the language model
|
119 |
+
|
120 |
+
```python
|
121 |
+
import torch
|
122 |
+
import torchaudio
|
123 |
+
|
124 |
+
from transformers import AutoModelForCTC, Wav2Vec2ProcessorWithLM
|
125 |
+
|
126 |
+
processor_with_lm = Wav2Vec2ProcessorWithLM.from_pretrained("bhuang/wav2vec2-xls-r-1b-voxpopuli-fr")
|
127 |
+
model = AutoModelForCTC.from_pretrained("bhuang/wav2vec2-xls-r-1b-voxpopuli-fr").cuda()
|
128 |
+
|
129 |
+
model_sampling_rate = processor_with_lm.feature_extractor.sampling_rate
|
130 |
+
|
131 |
+
# path to your audio file
|
132 |
+
wav_path = "/projects/bhuang/corpus/speech/multilingual-tedx/fr-fr/flac/09UU0I9gLNc_0.flac"
|
133 |
+
waveform, sample_rate = torchaudio.load(wav_path)
|
134 |
+
waveform = waveform.squeeze(axis=0) # mono
|
135 |
+
|
136 |
+
# resample
|
137 |
+
if sample_rate != 16_000:
|
138 |
+
resampler = torchaudio.transforms.Resample(sample_rate, 16_000)
|
139 |
+
waveform = resampler(waveform)
|
140 |
+
|
141 |
+
# normalize
|
142 |
+
input_dict = processor_with_lm(waveform, sampling_rate=16_000, return_tensors="pt")
|
143 |
+
|
144 |
+
with torch.inference_mode():
|
145 |
+
logits = model(input_dict.input_values.to("cuda")).logits
|
146 |
+
|
147 |
+
predicted_sentence = processor_with_lm.batch_decode(logits.cpu().numpy()).text[0]
|
148 |
+
```
|
149 |
|
150 |
|
151 |
## Evaluation
|
152 |
|
153 |
+
1. To evaluate on `polinaeterna/voxpopuli`
|
154 |
|
155 |
```bash
|
156 |
python eval.py \
|