ivangtorre commited on
Commit
7044354
1 Parent(s): 71acad7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +32 -0
README.md CHANGED
@@ -33,6 +33,38 @@ transcription = processor.batch_decode(predicted_ids)
33
  print("HF prediction: ", transcription)
34
  ```
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  ## Citation
37
 
38
  ```bibtex
 
33
  print("HF prediction: ", transcription)
34
  ```
35
 
36
+
37
+ This code snipnet shows how to Evaluate the wav2vec2-xlsr-300m-quechua in [Second Americas NLP 2022 Quechua dev set](https://huggingface.co/datasets/ivangtorre/second_americas_nlp_2022)
38
+
39
+ ```python
40
+ from datasets import load_dataset
41
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
42
+ import torch
43
+ from jiwer import cer
44
+ import torch.nn.functional as F
45
+
46
+
47
+ #librispeech_eval = load_dataset("ivangtorre/second_americas_nlp_2022", split="validation")
48
+ librispeech_eval = load_dataset("ivangtorre/second_americas_nlp_2022", split="validation")
49
+
50
+ model = Wav2Vec2ForCTC.from_pretrained("ivangtorre/wav2vec2-xlsr-300m-quechua")
51
+ processor = Wav2Vec2Processor.from_pretrained("ivangtorre/wav2vec2-xlsr-300m-quechua")
52
+
53
+ def map_to_pred(batch):
54
+ wav = batch["audio"][0]["array"]
55
+ feats = torch.from_numpy(wav).float()
56
+ feats = F.layer_norm(feats, feats.shape) # Normalization performed during finetuning
57
+ feats = torch.unsqueeze(feats, 0)
58
+ logits = model(feats).logits
59
+ predicted_ids = torch.argmax(logits, dim=-1)
60
+ batch["transcription"] = processor.batch_decode(predicted_ids)
61
+ return batch
62
+
63
+ result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1)
64
+
65
+ print("CER:", cer(result["source_processed"], result["transcription"]))
66
+ ```
67
+
68
  ## Citation
69
 
70
  ```bibtex