zahoor54321 commited on
Commit
5f3703f
1 Parent(s): b1892c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -22
app.py CHANGED
@@ -1,29 +1,25 @@
1
- import gradio as gr
2
  import torch
3
- import soundfile as sf
4
- from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
5
-
6
- model_path = "https://drive.google.com/drive/folders/1-CcW6f_wNoECTPIu92bnHBsJQgbdUih5?usp=sharing" # Update with your model's public link
7
-
8
- model = Wav2Vec2ForCTC.from_pretrained(model_path)
9
- tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_path)
10
-
11
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
- model.to(device)
13
-
14
- def transcribe_audio(audio):
15
- audio_data, _ = sf.read(audio.name)
16
- inputs = tokenizer(audio_data, return_tensors="pt", padding=True, truncation=True)
17
- inputs = inputs.to(device)
18
 
19
- with torch.no_grad():
20
- logits = model(inputs.input_values).logits
 
 
21
 
 
 
 
 
 
22
  predicted_ids = torch.argmax(logits, dim=-1).squeeze()
23
- transcription = tokenizer.decode(predicted_ids)
24
  return transcription
25
- audio_input = gr.inputs.Audio(source="file", type="file", label="Upload audio file")
 
 
26
  text_output = gr.outputs.Textbox(label="Transcription")
27
 
28
- interface = gr.Interface(fn=transcribe_audio, inputs=audio_input, outputs=text_output, title="Speech Recognition", description="Convert speech to text using your model")
29
- interface.launch()
 
 
1
  import torch
2
+ import torchaudio
3
+ import gradio as gr
4
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # Load the model and processor
7
+ model_name = "kingabzpro/wav2vec2-large-xlsr-300m-urdu"
8
+ model = Wav2Vec2ForCTC.from_pretrained(model_name)
9
+ processor = Wav2Vec2Processor.from_pretrained(model_name)
10
 
11
+ # Define the transcribe function
12
+ def transcribe(audio):
13
+ waveform, sample_rate = torchaudio.load(audio.name)
14
+ input_dict = processor(waveform, return_tensors="pt", padding=True)
15
+ logits = model(input_dict.input_values).logits
16
  predicted_ids = torch.argmax(logits, dim=-1).squeeze()
17
+ transcription = processor.decode(predicted_ids)
18
  return transcription
19
+
20
+ # Define the interface
21
+ audio_input = gr.inputs.Audio(source="upload", type="file", label="Upload audio file")
22
  text_output = gr.outputs.Textbox(label="Transcription")
23
 
24
+ interface = gr.Interface(fn=transcribe, inputs=audio_input, outputs=text_output, title="Urdu Speech Recognition")
25
+ interface.launch(share=True)