csikasote commited on
Commit
2da8ee6
1 Parent(s): ecfbf37

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +16 -16
README.md CHANGED
@@ -26,7 +26,7 @@ model-index:
26
  value: 42.14
27
  ---
28
 
29
- Wav2Vec2-Large-XLSR-53-Bemba
30
 
31
  Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Bemba using the [BembaSpeech](https://csikasote.github.io/BembaSpeech). When using this model, make sure that your speech input is sampled at 16kHz.
32
 
@@ -50,15 +50,15 @@ resampler = torchaudio.transforms.Resample(48_000, 16_000)
50
  # Preprocessing the datasets.
51
  # We need to read the aduio files as arrays
52
  def speech_file_to_array_fn(batch):
53
- \\\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
54
- \\\\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
55
- \\\\treturn batch
56
 
57
  test_dataset = test_dataset.map(speech_file_to_array_fn)
58
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
59
 
60
  with torch.no_grad():
61
- \\\\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
62
 
63
  predicted_ids = torch.argmax(logits, dim=-1)
64
 
@@ -86,30 +86,30 @@ processor = Wav2Vec2Processor.from_pretrained("csikasote/wav2vec2-large-xlsr-bem
86
  model = Wav2Vec2ForCTC.from_pretrained("csikasote/wav2vec2-large-xlsr-bemba")
87
  model.to("cuda")
88
 
89
- chars_to_ignore_regex = '[\\\\\\\\,\\\\\\\\?\\\\\\\\.\\\\\\\\!\\\\\\\\-\\\\\\\\;\\\\\\\\:\\\\\\\\"\\\\\\\\“]'
90
  #resampler = torchaudio.transforms.Resample(48_000, 16_000)
91
 
92
  # Preprocessing the datasets.
93
  # We need to read the aduio files as arrays
94
  def speech_file_to_array_fn(batch):
95
- \\\\tbatch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
96
- \\\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
97
- \\\\tbatch["speech"] = speech_array.squeeze().numpy()
98
- \\\\treturn batch
99
 
100
  test_dataset = test_dataset.map(speech_file_to_array_fn)
101
 
102
  # Preprocessing the datasets.
103
  # We need to read the aduio files as arrays
104
  def evaluate(batch):
105
- \\\\tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
106
 
107
- \\\\twith torch.no_grad():
108
- \\\\t\\\\tlogits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
109
 
110
- \\\\tpred_ids = torch.argmax(logits, dim=-1)
111
- \\\\tbatch["pred_strings"] = processor.batch_decode(pred_ids)
112
- \\\\treturn batch
113
 
114
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
115
 
 
26
  value: 42.14
27
  ---
28
 
29
+ # Wav2Vec2-Large-XLSR-53-Bemba
30
 
31
  Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Bemba using the [BembaSpeech](https://csikasote.github.io/BembaSpeech). When using this model, make sure that your speech input is sampled at 16kHz.
32
 
 
50
  # Preprocessing the datasets.
51
  # We need to read the aduio files as arrays
52
  def speech_file_to_array_fn(batch):
53
+ \\\\\\\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
54
+ \\\\\\\\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
55
+ \\\\\\\\treturn batch
56
 
57
  test_dataset = test_dataset.map(speech_file_to_array_fn)
58
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
59
 
60
  with torch.no_grad():
61
+ \\\\\\\\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
62
 
63
  predicted_ids = torch.argmax(logits, dim=-1)
64
 
 
86
  model = Wav2Vec2ForCTC.from_pretrained("csikasote/wav2vec2-large-xlsr-bemba")
87
  model.to("cuda")
88
 
89
+ chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\"\\\\\\\\\\\\\\\\“]'
90
  #resampler = torchaudio.transforms.Resample(48_000, 16_000)
91
 
92
  # Preprocessing the datasets.
93
  # We need to read the aduio files as arrays
94
  def speech_file_to_array_fn(batch):
95
+ \\\\\\\\tbatch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
96
+ \\\\\\\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
97
+ \\\\\\\\tbatch["speech"] = speech_array.squeeze().numpy()
98
+ \\\\\\\\treturn batch
99
 
100
  test_dataset = test_dataset.map(speech_file_to_array_fn)
101
 
102
  # Preprocessing the datasets.
103
  # We need to read the aduio files as arrays
104
  def evaluate(batch):
105
+ \\\\\\\\tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
106
 
107
+ \\\\\\\\twith torch.no_grad():
108
+ \\\\\\\\t\\\\\\\\tlogits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
109
 
110
+ \\\\\\\\tpred_ids = torch.argmax(logits, dim=-1)
111
+ \\\\\\\\tbatch["pred_strings"] = processor.batch_decode(pred_ids)
112
+ \\\\\\\\treturn batch
113
 
114
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
115