File size: 1,970 Bytes
a469909
 
 
8765be8
a469909
8765be8
 
 
 
 
a469909
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import gradio as gr
import requests
import tensorflow as tf
from transformers import pipeline, AutoModelForCTC, AutoTokenizer

model_name = "facebook/wav2vec2-large-xlsr-53-spanish"
model = AutoModelForCTC.from_pretrained(model_name, from_pt=True)
tokenizer = AutoTokenizer.from_pretrained(model_name)

trans = pipeline("automatic-speech-recognition", model=model, tokenizer=tokenizer)

def audio2text(audio):
  text = trans(audio)["text"]
  return text


# text2sentiment
classifier = pipeline("text-classification", model = "pysentimiento/robertuito-sentiment-analysis")

def text2sentiment(text):
  return classifier(text)[0]["label"]


# image_classification
inception_net = tf.keras.applications.MobileNetV2()

answer = requests.get("https://git.io/JJkYN")
labels = answer.text.split("\n")

def image_classification(inp):
  inp = inp.reshape((-1,224,224,3))
  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)
  prediction = inception_net.predict(inp).flatten()
  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
  return confidences


# demo
demo = gr.Blocks()

with demo:
  gr.Markdown("This is the second demo with Blocks")
  with gr.Tabs():
    with gr.TabItem("Transcribe audio in Spanish"):
      with gr.Row():
        audio = gr.Audio(source="microphone", type="filepath")
        transcription = gr.Textbox()
      b1 = gr.Button("Transcribe")

    with gr.TabItem("Sentiment analysis in Spanish"):
      with gr.Row():
        text = gr.Textbox()
        label_sentiment = gr.Label()
      b2 = gr.Button("Sentiment")

    with gr.TabItem("Image classification"):
      with gr.Row():
        image=gr.Image(shape=(224,224))
        label_image=gr.Label(num_top_classes=3)
      b3 = gr.Button("Classify")

    b1.click(audio2text, inputs = audio, outputs=transcription)
    b2.click(text2sentiment, inputs=text, outputs=label_sentiment)
    b3.click(image_classification, inputs=image, outputs=label_image)

demo.launch()