mgutierrez commited on
Commit
1d6bf96
1 Parent(s): 8aa0da7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -26
app.py CHANGED
@@ -3,51 +3,50 @@ import tensorflow as tf
3
  from transformers import pipeline
4
 
5
  inception_net = tf.keras.applications.MobileNetV2()
6
- def classify_imagen(inp):
 
7
  inp = inp.reshape((-1, 224, 224, 3))
8
  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)
9
  prediction = inception_net.predict(inp).reshape(1,1000)
10
  pred_scores = tf.keras.applications.mobilenet_v2.decode_predictions(prediction, top=100)
11
- confidence = {f'{pred_scores[0][i][1]}': float(pred_scores[0][i][2]) for i in range(100)}
12
- return confidence
13
 
14
  trans = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-xlsr-53-spanish")
15
- def audio2text(audio):
16
  text = trans(audio)["text"]
17
  return text
18
-
19
- classificator = pipeline("text-classification", model="pysentimiento/robertuito-sentiment-analysis")
20
- def text2sentiment(text):
21
- return classificator(text)[0]['label']
22
 
 
 
 
23
 
24
  demo = gr.Blocks()
25
 
26
  with demo:
27
- gr.Markdown("Este es el segundo demo con Blocks hecho por Rafa")
28
  with gr.Tabs():
29
-
30
- with gr.TabItem("Transcribe Audio en español"):
31
  with gr.Row():
32
- audio = gr.Audio(source='microphone', type='filepath')
33
- transcript = gr.Textbox()
34
- b1 = gr.Button("Transcribe")
35
 
36
- with gr.TabItem("Analisis de sentimientos"):
37
  with gr.Row():
38
- texto = gr.Textbox()
39
  label = gr.Label()
40
- b2 = gr.Button("Sentimientos")
41
-
42
- b1.click(audio2text, inputs=audio, outputs=transcript)
43
- b2.click(text2sentiment, inputs=texto, outputs=label)
44
 
45
- with gr.TabItem("Clasificador de imagenes"):
46
  with gr.Row():
47
- image = gr.Image(shape=(224, 224))
48
- label= gr.Label(num_top_classes=3)
49
- bimage= gr.Button("Clasificar")
50
-
51
- bimage.click(classify_imagen, inputs=image, outputs=label)
52
 
53
  demo.launch()
 
3
  from transformers import pipeline
4
 
5
  inception_net = tf.keras.applications.MobileNetV2()
6
+
7
+ def classify_image(inp):
8
  inp = inp.reshape((-1, 224, 224, 3))
9
  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)
10
  prediction = inception_net.predict(inp).reshape(1,1000)
11
  pred_scores = tf.keras.applications.mobilenet_v2.decode_predictions(prediction, top=100)
12
+ confidences = {f'{pred_scores[0][i][1]}': float(pred_scores[0][i][2]) for i in range(100)}
13
+ return confidences
14
 
15
  trans = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-xlsr-53-spanish")
16
+ def audio_to_text(audio):
17
  text = trans(audio)["text"]
18
  return text
 
 
 
 
19
 
20
+ classify = pipeline("text-classification", model="pysentimiento/robertuito-sentiment-analysis")
21
+ def text_to_sentiment(text):
22
+ return classify(text)[0]["label"]
23
 
24
  demo = gr.Blocks()
25
 
26
  with demo:
27
+ gr.Markdown("Second Demo with Blocks")
28
  with gr.Tabs():
29
+ with gr.TabItem("Transcript audio in spanish"):
 
30
  with gr.Row():
31
+ audio = gr.Audio(source="microphone", type="filepath")
32
+ transcription = gr.Textbox()
33
+ button1 = gr.Button("Please transcript")
34
 
35
+ with gr.TabItem("Sentiment analisys"):
36
  with gr.Row():
37
+ text = gr.Textbox()
38
  label = gr.Label()
39
+ button2 = gr.Button("Please sentiment")
40
+
41
+ button1.click(audio_to_text, inputs=audio, outputs=transcription)
42
+ button2.click(text_to_sentiment, inputs=text, outputs=label)
43
 
44
+ with gr.TabItem("Image classify"):
45
  with gr.Row():
46
+ image = gr.Image(shape=(224,224))
47
+ labelImage = gr.Label(num_top_classes=3)
48
+ button3 = gr.Button("Please classify Image")
49
+
50
+ button3.click(classify_image, inputs=image, outputs=labelImage)
51
 
52
  demo.launch()