rajistics commited on
Commit
35b1732
1 Parent(s): cb169d1

Cleaned up code

Browse files
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -6,14 +6,12 @@ import gradio as gr
6
 
7
  asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
8
 
 
 
9
  def transcribe(audio):
10
  text = asr(audio)["text"]
11
  return text
12
 
13
- classifier = pipeline(
14
- "text-classification",
15
- model="bhadresh-savani/distilbert-base-uncased-emotion")
16
-
17
  def speech_to_text(speech):
18
  text = asr(speech)["text"]
19
  return text
@@ -25,17 +23,17 @@ def text_to_sentiment(text):
25
  demo = gr.Blocks()
26
 
27
  with demo:
28
- #audio_file = gr.Audio(type="filepath")
29
  audio_file = gr.inputs.Audio(source="microphone", type="filepath")
 
 
30
  text = gr.Textbox()
31
- label = gr.Label()
32
- saved = gr.Textbox()
33
- savedAll = gr.Textbox()
34
-
35
- b1 = gr.Button("Recognize Speech")
36
  b2 = gr.Button("Classify Sentiment")
37
-
38
- b1.click(speech_to_text, inputs=audio_file, outputs=text)
39
  b2.click(text_to_sentiment, inputs=text, outputs=label)
 
 
 
 
40
 
41
  demo.launch(share=True)
 
6
 
7
  asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
8
 
9
+ classifier = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion")
10
+
11
  def transcribe(audio):
12
  text = asr(audio)["text"]
13
  return text
14
 
 
 
 
 
15
  def speech_to_text(speech):
16
  text = asr(speech)["text"]
17
  return text
 
23
  demo = gr.Blocks()
24
 
25
  with demo:
26
+
27
  audio_file = gr.inputs.Audio(source="microphone", type="filepath")
28
+ b1 = gr.Button("Recognize Speech")
29
+ b1.click(speech_to_text, inputs=audio_file, outputs=text)
30
  text = gr.Textbox()
31
+
 
 
 
 
32
  b2 = gr.Button("Classify Sentiment")
 
 
33
  b2.click(text_to_sentiment, inputs=text, outputs=label)
34
+ label = gr.Label()
35
+
36
+
37
+
38
 
39
  demo.launch(share=True)