|
import numpy as np |
|
import gradio as gr |
|
|
|
def flip_text(x): |
|
return x[::-1] |
|
|
|
def flip_image(x): |
|
return np.fliplr(x) |
|
|
|
interface_title = "TSAI-ERAV1 Capstone - Multimodal GPT" |
|
|
|
with gr.Interface( |
|
fn=None, |
|
live=True, |
|
title=interface_title, |
|
layout="vertical", |
|
description="Choose text mode/image mode/audio mode for generation", |
|
) as demo: |
|
|
|
with demo.Tab("Text mode"): |
|
text_input = demo.Textbox(placeholder="Enter a prompt", label="Input") |
|
text_input_count = demo.Textbox(placeholder="Enter number of characters you want to generate", label="Count") |
|
text_button = demo.Button("Generate Text") |
|
text_output = demo.Textbox(label="Chat GPT like text") |
|
|
|
text_button.click(flip_text, inputs=text_input, outputs=text_output) |
|
|
|
with demo.Tab("Image mode"): |
|
image_input = demo.Image() |
|
image_text_input = demo.Textbox(placeholder="Enter a question/prompt around the image", label="Question/Prompt") |
|
image_button = demo.Button("Generate Text") |
|
image_text_output = demo.Textbox(label="Answer") |
|
|
|
image_button.click(flip_text, inputs=image_text_input, outputs=image_text_output) |
|
|
|
with demo.Tab("Audio mode"): |
|
audio_text_input = demo.Textbox() |
|
audio_text_output = demo.Textbox() |
|
audio_button = demo.Button("Generate Text") |
|
|
|
audio_button.click(flip_text, inputs=audio_text_input, outputs=audio_text_output) |
|
|
|
demo.launch() |
|
|