gchhablani
commited on
Commit
•
d1d0762
1
Parent(s):
5c0cce7
Update README.md
Browse files
README.md
CHANGED
@@ -11,7 +11,7 @@ tags:
|
|
11 |
- xlsr-fine-tuning-week
|
12 |
license: apache-2.0
|
13 |
model-index:
|
14 |
-
- name:
|
15 |
results:
|
16 |
- task:
|
17 |
name: Speech Recognition
|
@@ -51,15 +51,15 @@ resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
|
51 |
# Preprocessing the datasets.
|
52 |
# We need to read the aduio files as arrays
|
53 |
def speech_file_to_array_fn(batch):
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
|
58 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
59 |
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
60 |
|
61 |
with torch.no_grad():
|
62 |
-
|
63 |
|
64 |
predicted_ids = torch.argmax(logits, dim=-1)
|
65 |
|
@@ -87,30 +87,30 @@ processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-pt
|
|
87 |
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-pt") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic`
|
88 |
model.to("cuda")
|
89 |
|
90 |
-
chars_to_ignore_regex = '[
|
91 |
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
92 |
|
93 |
# Preprocessing the datasets.
|
94 |
# We need to read the aduio files as arrays
|
95 |
def speech_file_to_array_fn(batch):
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
|
101 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
102 |
|
103 |
# Preprocessing the datasets.
|
104 |
# We need to read the aduio files as arrays
|
105 |
def evaluate(batch):
|
106 |
-
|
107 |
|
108 |
-
|
109 |
-
|
110 |
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
|
115 |
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
116 |
|
@@ -127,27 +127,27 @@ The Common Voice `train` and `validation` datasets were used for training. The s
|
|
127 |
|
128 |
```bash
|
129 |
#!/usr/bin/env bash
|
130 |
-
python run_common_voice.py
|
131 |
-
--model_name_or_path="facebook/wav2vec2-large-xlsr-53"
|
132 |
-
--dataset_config_name="pt"
|
133 |
-
--output_dir=/workspace/output_models/pt/wav2vec2-large-xlsr-pt
|
134 |
-
--cache_dir=/workspace/data
|
135 |
-
--overwrite_output_dir
|
136 |
-
--num_train_epochs="30"
|
137 |
-
--per_device_train_batch_size="32"
|
138 |
-
--per_device_eval_batch_size="32"
|
139 |
-
--evaluation_strategy="steps"
|
140 |
-
--learning_rate="3e-4"
|
141 |
-
--warmup_steps="500"
|
142 |
-
--fp16
|
143 |
-
--freeze_feature_extractor
|
144 |
-
--save_steps="500"
|
145 |
-
--eval_steps="500"
|
146 |
-
--save_total_limit="1"
|
147 |
-
--logging_steps="500"
|
148 |
-
--group_by_length
|
149 |
-
--feat_proj_dropout="0.0"
|
150 |
-
--layerdrop="0.1"
|
151 |
-
--gradient_checkpointing
|
152 |
-
--do_train --do_eval
|
153 |
```
|
11 |
- xlsr-fine-tuning-week
|
12 |
license: apache-2.0
|
13 |
model-index:
|
14 |
+
- name: Wav2Vec2 Large 53 Portugese by Gunjan Chhablani
|
15 |
results:
|
16 |
- task:
|
17 |
name: Speech Recognition
|
51 |
# Preprocessing the datasets.
|
52 |
# We need to read the aduio files as arrays
|
53 |
def speech_file_to_array_fn(batch):
|
54 |
+
\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
|
55 |
+
\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
|
56 |
+
\treturn batch
|
57 |
|
58 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
59 |
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
60 |
|
61 |
with torch.no_grad():
|
62 |
+
\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
|
63 |
|
64 |
predicted_ids = torch.argmax(logits, dim=-1)
|
65 |
|
87 |
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-pt") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic`
|
88 |
model.to("cuda")
|
89 |
|
90 |
+
chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“\\'\\�]'
|
91 |
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
92 |
|
93 |
# Preprocessing the datasets.
|
94 |
# We need to read the aduio files as arrays
|
95 |
def speech_file_to_array_fn(batch):
|
96 |
+
\t batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
|
97 |
+
\t\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
|
98 |
+
\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
|
99 |
+
\treturn batch
|
100 |
|
101 |
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
102 |
|
103 |
# Preprocessing the datasets.
|
104 |
# We need to read the aduio files as arrays
|
105 |
def evaluate(batch):
|
106 |
+
\tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
107 |
|
108 |
+
\twith torch.no_grad():
|
109 |
+
\t\tlogits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
|
110 |
|
111 |
+
\tpred_ids = torch.argmax(logits, dim=-1)
|
112 |
+
\tbatch["pred_strings"] = processor.batch_decode(pred_ids)
|
113 |
+
\treturn batch
|
114 |
|
115 |
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
116 |
|
127 |
|
128 |
```bash
|
129 |
#!/usr/bin/env bash
|
130 |
+
python run_common_voice.py \\
|
131 |
+
--model_name_or_path="facebook/wav2vec2-large-xlsr-53" \\
|
132 |
+
--dataset_config_name="pt" \\
|
133 |
+
--output_dir=/workspace/output_models/pt/wav2vec2-large-xlsr-pt \\
|
134 |
+
--cache_dir=/workspace/data \\
|
135 |
+
--overwrite_output_dir \\
|
136 |
+
--num_train_epochs="30" \\
|
137 |
+
--per_device_train_batch_size="32" \\
|
138 |
+
--per_device_eval_batch_size="32" \\
|
139 |
+
--evaluation_strategy="steps" \\
|
140 |
+
--learning_rate="3e-4" \\
|
141 |
+
--warmup_steps="500" \\
|
142 |
+
--fp16 \\
|
143 |
+
--freeze_feature_extractor \\
|
144 |
+
--save_steps="500" \\
|
145 |
+
--eval_steps="500" \\
|
146 |
+
--save_total_limit="1" \\
|
147 |
+
--logging_steps="500" \\
|
148 |
+
--group_by_length \\
|
149 |
+
--feat_proj_dropout="0.0" \\
|
150 |
+
--layerdrop="0.1" \\
|
151 |
+
--gradient_checkpointing \\
|
152 |
+
--do_train --do_eval \\
|
153 |
```
|