alviadn commited on
Commit
8d41672
1 Parent(s): f363ed0
Files changed (1) hide show
  1. app.py +33 -15
app.py CHANGED
@@ -1,21 +1,39 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
 
 
 
 
 
 
 
 
 
4
 
5
- model = pipeline(task="automatic-speech-recognition",
 
 
 
 
 
6
 
7
- def predict_speech_to_text(audio):
8
- prediction = model(audio)
9
- text = prediction['text']
10
- return text
 
 
11
 
12
- gr.Interface.load("models/facebook/s2t-medium-librispeech-asr")
13
- gr.Interface(fn=predict_speech_to_text,
14
- title="🧑🏽‍🎤 PROLOVE 🥰😘 ",
15
- inputs=gr.inputs.Audio(
16
- source="microphone", type="filepath", label="Input"),
17
- outputs=gr.outputs.Textbox(label="Output"),
18
- description="This application was created to help correct pronouncation",
19
- examples=['Good night_alvi.wav'],
20
- allow_flagging='never'
21
- ).launch()
 
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Emotion Text Classification
5
+ title_emotion = "Classify text according to emotion"
6
+ description_emotion = "Emotion text classification by Vishal Tiwari "
7
+ examples_emotion = [
8
+ ["Remember before Twitter when you took a photo of food, got the film developed, then drove around showing everyone the pic? No? Me neither."],
9
+ ['''"We are all here because we're committed to the biggest question of all: What's out there?" Take your first steps toward answering that question by watching our Gameplay Reveal from the #XboxBethesda Showcase. '''],
10
+ ["A STUNNER IN KNOXVILLE! 😱 Notre Dame takes down No. 1 Tennessee for its first trip to Omaha in 20 years‼️"],
11
+ ["you and I best moment is yet to come 💜 #BTS9thAnniversary"]
12
+ ]
13
 
14
+ interface_emotion = gr.Interface.load(
15
+ "huggingface/bhadresh-savani/bert-base-go-emotion",
16
+ title=title_emotion,
17
+ description=description_emotion,
18
+ examples=examples_emotion
19
+ )
20
 
21
+ # Text to Speech Translation
22
+ title_tts = "Text to Speech Translation"
23
+ examples_tts = [
24
+ "I love learning machine learning",
25
+ "How do you do?",
26
+ ]
27
 
28
+ interface_tts = gr.Interface.load(
29
+ "huggingface/facebook/fastspeech2-en-ljspeech",
30
+ title=title_tts,
31
+ examples=examples_tts,
32
+ description="Give me something to say!",
33
+ )
34
+
35
+ # Launching both interfaces with tabs
36
+ demo = gr.TabbedInterface([interface_emotion, interface_tts], ["Emotion Classification", "Text to Speech"])
37
+
38
+ if __name__ == "__main__":
39
+ demo.launch()