|
import gradio as gr |
|
from main import index, run, ingest_files |
|
from gtts import gTTS |
|
import os, time |
|
|
|
from transformers import pipeline |
|
|
|
p = pipeline("automatic-speech-recognition", model="openai/whisper-base") |
|
|
|
"""Use text to call chat method from main.py""" |
|
|
|
models = ["GPT-3.5", "Flan UL2", "Flan T5", "GPT-4"] |
|
|
|
name = os.environ.get("name", "Rohan") |
|
|
|
def add_text(history, text, model): |
|
print("Question asked: " + text) |
|
response = run_model(text, model) |
|
history = history + [(text, response)] |
|
print(history) |
|
return history, "" |
|
|
|
|
|
def run_model(text, model): |
|
start_time = time.time() |
|
print("start time:" + str(start_time)) |
|
response = run(text, model) |
|
end_time = time.time() |
|
|
|
if "SOURCES:" in response: |
|
response = response.replace("SOURCES:", "\nSOURCES:") |
|
|
|
print(response) |
|
print("Time taken: " + str(end_time - start_time)) |
|
return response |
|
|
|
|
|
|
|
def get_output(history, audio, model): |
|
|
|
txt = p(audio)["text"] |
|
|
|
audio_path = 'response.wav' |
|
response = run_model(txt, model) |
|
|
|
trimmed_response = response.split("SOURCES:")[0] |
|
myobj = gTTS(text=trimmed_response, lang='en', slow=False) |
|
myobj.save(audio_path) |
|
|
|
|
|
|
|
history.append(( (audio, ) , (audio_path, ))) |
|
print(history) |
|
return history |
|
|
|
def set_model(history, model, first_time=False): |
|
print("Model selected: " + model) |
|
history = get_first_message(history) |
|
index(model, first_time) |
|
return history |
|
|
|
|
|
def get_first_message(history): |
|
history = [(None, |
|
"Hi! I am " + name + "'s Personal Assistant. Want " + name + " to answer your questions? Just Roar it!")] |
|
return history |
|
|
|
def clear_audio(audio): |
|
return None |
|
|
|
def bot(history): |
|
return history |
|
|
|
def upload_file(files, history, model): |
|
file_paths = [file.name for file in files] |
|
print("Ingesting files: " + str(file_paths)) |
|
text = 'Uploaded a file' |
|
if ingest_files(file_paths, model): |
|
response = 'Files are ingested. Roar now!' |
|
else: |
|
response = 'Files are not ingested. Please try again.' |
|
|
|
history = history + [(text, response)] |
|
return history |
|
|
|
theme = gr.Theme.from_hub("snehilsanyal/scikit-learn") |
|
|
|
theme.block_background_fill = gr.themes.colors.neutral.c100 |
|
theme.block_border_width = '2px' |
|
theme.block_border_radius = '10px' |
|
|
|
|
|
with gr.Blocks(theme=theme, title='Roar!') as demo: |
|
|
|
gr.HTML('<img src="file/assets/logo.png" style="width: 100px; height: 100px; margin: 0 auto;border:5px solid orange;border-radius: 50%; display: block">') |
|
|
|
gr.HTML("<h1 style='text-align: center;'>Roar - A Personal Assistant</h1>") |
|
|
|
chatbot = gr.Chatbot(get_first_message([]), elem_id="chatbot").style(height=500) |
|
|
|
with gr.Row(): |
|
|
|
radio = gr.Radio(models, label="Choose a model", value="GPT-4", type="value") |
|
with gr.Row(): |
|
with gr.Column(scale=0.6): |
|
txt = gr.Textbox( |
|
label="Let's hear the roar!", |
|
placeholder="Enter text and press enter, or upload a file", lines=1 |
|
).style(container=False) |
|
|
|
with gr.Column(scale=0.2): |
|
upload = gr.UploadButton(label="Roar on a file", type="file", file_count='multiple', file_types=['docx', 'txt', 'pdf', 'html']).style(container=False) |
|
|
|
with gr.Column(scale=0.2): |
|
audio = gr.Audio(source="microphone", type="filepath", label="Let me hear your roar!").style(container=False) |
|
|
|
with gr.Row(): |
|
gr.Examples(examples=['Roar it! What are you an expert of?', ' Roar it! What are you currently doing?', |
|
'Roar it! What is your opinion on Large Language Models?'], inputs=[txt], label="Examples") |
|
|
|
txt.submit(add_text, [chatbot, txt, radio], [chatbot, txt], postprocess=False).then( |
|
bot, chatbot, chatbot |
|
) |
|
|
|
radio.change(fn=set_model, inputs=[chatbot, radio], outputs=[chatbot]).then(bot, chatbot, chatbot) |
|
|
|
audio.change(fn=get_output, inputs=[chatbot, audio, radio], outputs=[chatbot, audio], show_progress=True).then( |
|
bot, chatbot, chatbot, clear_audio |
|
) |
|
|
|
upload.upload(upload_file, inputs=[upload, chatbot, radio], outputs=[chatbot]).then(bot, chatbot, chatbot) |
|
|
|
set_model(chatbot, radio.value, first_time=True) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.queue() |
|
demo.queue(concurrency_count=5) |
|
demo.launch(debug=True, favicon_path="file/assets/logo.png") |
|
|