DrishtiSharma commited on
Commit
8fc101b
1 Parent(s): 3e57954

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -43
app.py DELETED
@@ -1,43 +0,0 @@
1
- import gradio as gr
2
- import librosa
3
- from transformers import AutoFeatureExtractor, pipeline
4
-
5
-
6
- def load_and_fix_data(input_file, model_sampling_rate):
7
- speech, sample_rate = librosa.load(input_file)
8
- if len(speech.shape) > 1:
9
- speech = speech[:, 0] + speech[:, 1]
10
- if sample_rate != model_sampling_rate:
11
- speech = librosa.resample(speech, sample_rate, model_sampling_rate)
12
- return speech
13
-
14
-
15
- feature_extractor = AutoFeatureExtractor.from_pretrained(
16
- "anuragshas/wav2vec2-xls-r-1b-hi-with-lm"
17
- )
18
- sampling_rate = feature_extractor.sampling_rate
19
-
20
- asr = pipeline(
21
- "automatic-speech-recognition", model="anuragshas/wav2vec2-xls-r-1b-hi-with-lm"
22
- )
23
-
24
-
25
- def predict_and_ctc_lm_decode(input_file):
26
- speech = load_and_fix_data(input_file, sampling_rate)
27
- transcribed_text = asr(speech, chunk_length_s=5, stride_length_s=1)
28
- return transcribed_text["text"]
29
-
30
-
31
- gr.Interface(
32
- predict_and_ctc_lm_decode,
33
- inputs=[
34
- gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio")
35
- ],
36
- outputs=[gr.outputs.Textbox()],
37
- examples=[["example1.wav"]],
38
- title="Hindi ASR using Wav2Vec2-1B with LM",
39
- article="<p><center><img src='https://visitor-badge.glitch.me/badge?page_id=anuragshas/Hindi_ASR' alt='visitor badge'></center></p>",
40
- description="Built during Robust Speech Event",
41
- layout="horizontal",
42
- theme="huggingface",
43
- ).launch(enable_queue=True, cache_examples=True)