anuragshas commited on
Commit
f83b190
1 Parent(s): 032c0ae

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -15
README.md CHANGED
@@ -36,15 +36,12 @@ import torchaudio
36
  from datasets import load_dataset
37
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
38
  import pandas as pd
39
-
40
  # Evaluation notebook contains the procedure to download the data
41
- df = pd.read_csv("/content/te/test.tsv", sep="\\\\t")
42
  df["path"] = "/content/te/clips/" + df["path"]
43
  test_dataset = Dataset.from_pandas(df)
44
-
45
  processor = Wav2Vec2Processor.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu")
46
  model = Wav2Vec2ForCTC.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu")
47
-
48
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
49
  # Preprocessing the datasets.
50
  # We need to read the aduio files as arrays
@@ -52,7 +49,6 @@ def speech_file_to_array_fn(batch):
52
  speech_array, sampling_rate = torchaudio.load(batch["path"])
53
  batch["speech"] = resampler(speech_array).squeeze().numpy()
54
  return batch
55
-
56
  test_dataset = test_dataset.map(speech_file_to_array_fn)
57
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
58
  with torch.no_grad():
@@ -70,25 +66,19 @@ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
70
  import re
71
  from sklearn.model_selection import train_test_split
72
  import pandas as pd
73
-
74
  # Evaluation notebook contains the procedure to download the data
75
- df = pd.read_csv("/content/te/test.tsv", sep="\\\\t")
76
  df["path"] = "/content/te/clips/" + df["path"]
77
  test_dataset = Dataset.from_pandas(df)
78
  wer = load_metric("wer")
79
-
80
  processor = Wav2Vec2Processor.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu")
81
  model = Wav2Vec2ForCTC.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu")
82
  model.to("cuda")
83
-
84
- chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\_\\;\\:\\"\\“\\%\\‘\\”\\।\\’\'\\&]'
85
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
86
-
87
  def normalizer(text):
88
  # Use your custom normalizer
89
- text = text.replace("\\\\\\
90
- ","\\
91
- ")
92
  text = ' '.join(text.split())
93
  text = re.sub(r'''([a-z]+)''','',text,flags=re.IGNORECASE)
94
  text = re.sub(r'''%'''," శాతం ", text)
@@ -96,7 +86,6 @@ def normalizer(text):
96
  text = re.sub("ై","ై", text)
97
  text = text.strip()
98
  return text
99
-
100
  def speech_file_to_array_fn(batch):
101
  batch["sentence"] = normalizer(batch["sentence"])
102
  batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()+ " "
36
  from datasets import load_dataset
37
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
38
  import pandas as pd
 
39
  # Evaluation notebook contains the procedure to download the data
40
+ df = pd.read_csv("/content/te/test.tsv", sep="\t")
41
  df["path"] = "/content/te/clips/" + df["path"]
42
  test_dataset = Dataset.from_pandas(df)
 
43
  processor = Wav2Vec2Processor.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu")
44
  model = Wav2Vec2ForCTC.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu")
 
45
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
46
  # Preprocessing the datasets.
47
  # We need to read the aduio files as arrays
49
  speech_array, sampling_rate = torchaudio.load(batch["path"])
50
  batch["speech"] = resampler(speech_array).squeeze().numpy()
51
  return batch
 
52
  test_dataset = test_dataset.map(speech_file_to_array_fn)
53
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
54
  with torch.no_grad():
66
  import re
67
  from sklearn.model_selection import train_test_split
68
  import pandas as pd
 
69
  # Evaluation notebook contains the procedure to download the data
70
+ df = pd.read_csv("/content/te/test.tsv", sep="\t")
71
  df["path"] = "/content/te/clips/" + df["path"]
72
  test_dataset = Dataset.from_pandas(df)
73
  wer = load_metric("wer")
 
74
  processor = Wav2Vec2Processor.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu")
75
  model = Wav2Vec2ForCTC.from_pretrained("anuragshas/wav2vec2-large-xlsr-53-telugu")
76
  model.to("cuda")
77
+ chars_to_ignore_regex = '[\,\?\.\!\-\_\;\:\"\“\%\‘\”\।\’\'\&]'
 
78
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
 
79
  def normalizer(text):
80
  # Use your custom normalizer
81
+ text = text.replace("\\n","\n")
 
 
82
  text = ' '.join(text.split())
83
  text = re.sub(r'''([a-z]+)''','',text,flags=re.IGNORECASE)
84
  text = re.sub(r'''%'''," శాతం ", text)
86
  text = re.sub("ై","ై", text)
87
  text = text.strip()
88
  return text
 
89
  def speech_file_to_array_fn(batch):
90
  batch["sentence"] = normalizer(batch["sentence"])
91
  batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()+ " "