RikeshSilwal commited on
Commit
8593e6e
1 Parent(s): faf44bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -12
app.py CHANGED
@@ -3,10 +3,14 @@ import gradio as gr
3
  import torch
4
  import torchaudio
5
  from datasets import load_dataset
6
- from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
7
  import pandas as pd
8
  from sklearn.model_selection import train_test_split
9
 
 
 
 
 
10
 
11
 
12
  # processor = Wav2Vec2Processor.from_pretrained("RikeshSilwal/wav2vec2-nepali")
@@ -22,21 +26,42 @@ import numpy as np
22
 
23
 
24
 
25
- def transcribe_audio(audio_file):
26
- input_arr, sampling_rate =torchaudio.load(audio_file)
27
- resampler = Resample(orig_freq=sampling_rate, new_freq=16000)
28
- input_arr = resampler(input_arr).squeeze().numpy()
29
- sampling_rate = 16000
30
- inputs = processor(input_arr, sampling_rate=16_000, return_tensors="pt", padding=True)
31
 
32
- with torch.no_grad():
33
- logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
34
 
35
- predicted_ids = torch.argmax(logits, dim=-1)
36
 
37
- predicted_words= processor.batch_decode(predicted_ids)
38
 
39
- return predicted_words[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  audio_input = gr.inputs.Audio(source="upload", type="filepath")
42
 
 
3
  import torch
4
  import torchaudio
5
  from datasets import load_dataset
6
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, pipeline
7
  import pandas as pd
8
  from sklearn.model_selection import train_test_split
9
 
10
+ from noisereduce.torchgate import TorchGate as TG
11
+ import re
12
+ from pydub import AudioSegment
13
+
14
 
15
 
16
  # processor = Wav2Vec2Processor.from_pretrained("RikeshSilwal/wav2vec2-nepali")
 
26
 
27
 
28
 
29
+ # def transcribe_audio(audio_file):
30
+ # input_arr, sampling_rate =torchaudio.load(audio_file)
31
+ # resampler = Resample(orig_freq=sampling_rate, new_freq=16000)
32
+ # input_arr = resampler(input_arr).squeeze().numpy()
33
+ # sampling_rate = 16000
34
+ # inputs = processor(input_arr, sampling_rate=16_000, return_tensors="pt", padding=True)
35
 
36
+ # with torch.no_grad():
37
+ # logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
38
 
39
+ # predicted_ids = torch.argmax(logits, dim=-1)
40
 
41
+ # predicted_words= processor.batch_decode(predicted_ids)
42
 
43
+ # return predicted_words[0]
44
+
45
+ def transcribe_audio(audio_file):
46
+ audio = AudioSegment.from_wav(audio_file)
47
+
48
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
49
+
50
+ input_arr, sampling_rate =torchaudio.load(audio_file)
51
+ # Create TorchGating instance
52
+ tg = TG(sr=sampling_rate, nonstationary=True).to(device)
53
+ try:
54
+ input_arr = tg(input_arr)
55
+ except:
56
+ input_arr = input_arr
57
+ if sampling_rate != 16000:
58
+ resampler = torchaudio.transforms.Resample(orig_freq=sampling_rate, new_freq=16000)
59
+ input_arr = resampler(input_arr).squeeze().numpy()
60
+
61
+ recognizer = pipeline("automatic-speech-recognition", model="Harveenchadha/vakyansh-wav2vec2-nepali-nem-130")
62
+ prediction = recognizer(input_arr, chunk_length_s=5, stride_length_s=(2,1))
63
+ prediction = recognizer(input_arr)
64
+ prediction = re.sub('[<s>]' , '' , str(prediction['text']))
65
 
66
  audio_input = gr.inputs.Audio(source="upload", type="filepath")
67