JPLTedCas commited on
Commit
f725323
1 Parent(s): 63d47ed

Upload App.py

Browse files
Files changed (1) hide show
  1. App.py +56 -0
App.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+ import time
4
+
5
+ p = pipeline("automatic-speech-recognition",model="jonatasgrosman/wav2vec2-large-xlsr-53-spanish")
6
+ pc = pipeline("automatic-speech-recognition",model="softcatala/wav2vec2-large-xlsr-catala")
7
+ pe = pipeline("automatic-speech-recognition",model="jonatasgrosman/wav2vec2-large-xlsr-53-english")
8
+
9
+
10
+
11
+
12
+ def transcribe(language,audio, state=""):#language="Spanish",
13
+ time.sleep(2)
14
+ if language=="Spanish":
15
+ state=""
16
+ text = p(audio)["text"]
17
+ if language=="Catalan":
18
+ state=""
19
+ text = pc(audio)["text"]
20
+ if language=="English":
21
+ state=""
22
+ text = pe(audio)["text"]
23
+ state += text + " "
24
+ #text2="Esto es loq ue te he entendido"
25
+ return state, state
26
+
27
+ demo=gr.Interface(
28
+ fn=transcribe,
29
+
30
+ title="TEDCAS Offline Speech recognition",
31
+ description="1)Select language 2)Click on 'record from microphone' and talk 3)Click on 'stop recording' 4)Click on submit 5)Before starting again, click on 'clear'",
32
+
33
+ inputs=[
34
+ gr.Dropdown(["Spanish","Catalan","English"]),
35
+ #gr.Audio(source="microphone", type="filepath", streaming=True),
36
+ gr.inputs.Audio(source="microphone", type="filepath"),
37
+ "state"#,"language"
38
+ ],
39
+ outputs=[
40
+ "textbox",
41
+ "state"
42
+ ],
43
+ #live=True).launch()
44
+ )
45
+ demo.launch()
46
+ #gr.Interface(
47
+ # fn=transcribe,
48
+ # inputs=[
49
+ # gr.inputs.Audio(source="microphone", type="filepath"),
50
+ # "state"
51
+ # ],
52
+ # outputs=[
53
+ # "textbox",
54
+ # "state"
55
+ # ],
56
+ # live=True).launch()