EloiCampeny commited on
Commit
a469909
1 Parent(s): e0a68c8

code of the demo

Browse files

All code for the demo

Files changed (1) hide show
  1. app.py +63 -0
app.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import tensorflow as tf
4
+ from transformers import pipeline
5
+
6
+ # audio2text
7
+ trans = pipeline("automatic-speech-recognition", model = "facebook/wav2vec2-large-xlsr-53-spanish")
8
+
9
+ def audio2text(audio):
10
+ text = trans(audio)["text"]
11
+ return text
12
+
13
+
14
+ # text2sentiment
15
+ classifier = pipeline("text-classification", model = "pysentimiento/robertuito-sentiment-analysis")
16
+
17
+ def text2sentiment(text):
18
+ return classifier(text)[0]["label"]
19
+
20
+
21
+ # image_classification
22
+ inception_net = tf.keras.applications.MobileNetV2()
23
+
24
+ answer = requests.get("https://git.io/JJkYN")
25
+ labels = answer.text.split("\n")
26
+
27
+ def image_classification(inp):
28
+ inp = inp.reshape((-1,224,224,3))
29
+ inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)
30
+ prediction = inception_net.predict(inp).flatten()
31
+ confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
32
+ return confidences
33
+
34
+
35
+ # demo
36
+ demo = gr.Blocks()
37
+
38
+ with demo:
39
+ gr.Markdown("This is the second demo with Blocks")
40
+ with gr.Tabs():
41
+ with gr.TabItem("Transcribe audio in Spanish"):
42
+ with gr.Row():
43
+ audio = gr.Audio(source="microphone", type="filepath")
44
+ transcription = gr.Textbox()
45
+ b1 = gr.Button("Transcribe")
46
+
47
+ with gr.TabItem("Sentiment analysis in Spanish"):
48
+ with gr.Row():
49
+ text = gr.Textbox()
50
+ label_sentiment = gr.Label()
51
+ b2 = gr.Button("Sentiment")
52
+
53
+ with gr.TabItem("Image classification"):
54
+ with gr.Row():
55
+ image=gr.Image(shape=(224,224))
56
+ label_image=gr.Label(num_top_classes=3)
57
+ b3 = gr.Button("Classify")
58
+
59
+ b1.click(audio2text, inputs = audio, outputs=transcription)
60
+ b2.click(text2sentiment, inputs=text, outputs=label_sentiment)
61
+ b3.click(image_classification, inputs=image, outputs=label_image)
62
+
63
+ demo.launch()