SakshiRathi77 commited on
Commit
50547ae
1 Parent(s): 07a5de7

Upload 8 files

Browse files
Images/Hindi-Speech-Voice-Recognition-Tool.jpg ADDED
Images/image-bg.jpg ADDED
app.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import pytube as pt
4
+ from transformers import pipeline
5
+ from huggingface_hub import model_info
6
+ import time
7
+ import unicodedata
8
+
9
+ MODEL_NAME = "SakshiRathi77/wav2vec2-large-xlsr-300m-hi-kagglex"
10
+ lang = "hi"
11
+
12
+ device = 0 if torch.cuda.is_available() else "cpu"
13
+ pipe = pipeline(
14
+ task="automatic-speech-recognition",
15
+ model=MODEL_NAME,
16
+ device=device,
17
+ )
18
+
19
+ def transcribe(microphone, file_upload):
20
+ warn_output = ""
21
+ if (microphone is not None) and (file_upload is not None):
22
+ warn_output = (
23
+ "WARNING: You've uploaded an audio file and used the microphone. "
24
+ "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
25
+ )
26
+
27
+ elif (microphone is None) and (file_upload is None):
28
+ return "ERROR: You have to either use the microphone or upload an audio file"
29
+ file = microphone if microphone is not None else file_upload
30
+ text = pipe(file)["text"]
31
+
32
+ return warn_output + text
33
+
34
+
35
+ def rt_transcribe(audio, state=""):
36
+ time.sleep(2)
37
+ text = pipe(audio)["text"]
38
+ state += unicodedata.normalize("NFC",text) + " "
39
+
40
+ return state, state
41
+
42
+
43
+
44
+ demo = gr.Blocks()
45
+ examples=[["examples/example1.mp3"], ["examples/example2.mp3"],["examples/example3.mp3"]]
46
+
47
+ title ="""
48
+ HindiSpeechPro: WAV2VEC-Powered ASR Interface
49
+ """
50
+
51
+ description = """
52
+ <p>
53
+ <center>
54
+ Welcome to the HindiSpeechPro, a cutting-edge interface powered by a fine-tuned version of facebook/wav2vec2-xls-r-300m on the common_voice dataset.
55
+ <img src="https://huggingface.co/spaces/SakshiRathi77/SakshiRathi77-Wav2Vec2-hi-kagglex/resolve/main/Images/Hindi-Speech-Voice-Recognition-Tool.jpg" alt="logo" ;>
56
+ </center>
57
+ </p>
58
+ """
59
+
60
+ # article = "<p style='text-align: center'><a href='https://github.com/SakshiRathi77/ASR' target='_blank'>Source Code on Github</a></p><p style='text-align: center'><a href='https://huggingface.co/blog/fine-tune-xlsr-wav2vec2' target='_blank'>Reference</a></p><p style='text-align: center'><a href='https://forms.gle/hjfc3F1P7m3weQVAA' target='_blank'><img src='https://e7.pngegg.com/pngimages/794/310/png-clipart-customer-review-feedback-user-service-others-miscellaneous-text-thumbnail.png' alt='Feedback Form' ;></a></p>"
61
+
62
+
63
+ mf_transcribe = gr.Interface(
64
+ fn=transcribe,
65
+ inputs=[
66
+ gr.inputs.Audio(source="microphone", type="filepath"),
67
+ gr.inputs.Audio(source="upload", type="filepath"),
68
+ ],
69
+ outputs="text",
70
+ theme="huggingface",
71
+ title=title,
72
+ description= description ,
73
+ allow_flagging="never",
74
+ examples=examples,
75
+ )
76
+
77
+ rt_transcribe = gr.Interface(
78
+ fn=rt_transcribe,
79
+ inputs=[
80
+ gr.Audio(source="microphone", type="filepath", streaming=True),
81
+ "state"
82
+ ],
83
+ outputs=[ "textbox",
84
+ "state"],
85
+ theme="huggingface",
86
+ title=title,
87
+ description= description ,
88
+ allow_flagging="never",
89
+ live=True,
90
+ )
91
+
92
+
93
+ with demo:
94
+ gr.TabbedInterface([mf_transcribe, rt_transcribe], ["Transcribe Audio", "Transcribe Realtime Voice"])
95
+
96
+ demo.launch(share=True)
examples/example1.mp3 ADDED
Binary file (34.4 kB). View file
 
examples/example2.mp3 ADDED
Binary file (36.3 kB). View file
 
examples/example3.mp3 ADDED
Binary file (29.2 kB). View file
 
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ git+https://github.com/huggingface/transformers
3
+ pytube