fastlane / app.py
hrguarinv's picture
Update app.py
4e0f0a4 verified
import gradio as gr
import requests
from fastapi import FastAPI
from fastapi.testclient import TestClient
from services.utils import undo_last_message, clear_chat
from services.nlp import transcribe
from routes import input_handler, purchase, order_management, account_management, customer_support, search_products
app = FastAPI()
app.include_router(purchase.router, prefix="/purchase", tags=["purchase"])
app.include_router(order_management.router,
prefix="/order-management", tags=["order-management"])
app.include_router(account_management.router,
prefix="/account-management", tags=["account-management"])
app.include_router(customer_support.router,
prefix="/customer-support", tags=["customer-support"])
app.include_router(search_products.router,
prefix="/search-products", tags=["search-products"])
app.include_router(input_handler.router,
prefix="/input-handler", tags=["input-handler"])
# Initialize the TestClient with the FastAPI app
client = TestClient(app)
def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)
def add_message(history, message, audio_input):
if message is None and audio_input is None:
return "Please provide either text or audio."
history_openai_format = [
{"role": "system", "content": "You are an assistant for an eCommerce store."}]
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append(
{"role": "assistant", "content": assistant})
if message["text"] is not None:
history_openai_format.append(
{"role": "user", "content": message["text"]})
if audio_input:
transcription = transcribe(audio_input)
print(f"Transcription: {transcription}")
message["text"] += f' [Audio transcription] = {transcription}'
history_openai_format.append(
{"role": "user", "content": transcription})
for x in message["files"]:
#history.append(((x,), None))
message["text"] += ' [File attached]'
history_openai_format.append(
{"role": "user", "content": "Image attached"})
response = client.post("/input-handler/", json={"text": message["text"], "files": message["files"], "history": history_openai_format})
if response.status_code == 200:
bot_response = response.json().get("generative response")
history.append((message["text"], bot_response))
return history, gr.MultimodalTextbox(value=None, interactive=False), None
with gr.Blocks(theme="soft") as demo:
gr.Markdown("<h1 style='text-align: center;'>Try it!</h1>")
gr.Markdown("AI sales assistance for e-commerce")
chatbot = gr.Chatbot(
height=400,
elem_id="chatbot"
)
# Add clear and undo buttons
with gr.Row():
undo_btn = gr.Button("Delete Previous")
clear_btn = gr.Button("Clear")
undo_btn.click(undo_last_message, chatbot, chatbot)
clear_btn.click(clear_chat, [], chatbot)
chat_input = gr.MultimodalTextbox(
interactive=True, placeholder="Enter message, upload file, or record audio...", show_label=False)
audio_input = gr.Audio(sources=["microphone"])
chat_msg = chat_input.submit(
add_message, [chatbot, chat_input, audio_input], [chatbot, chat_input, audio_input])
chat_msg.then(lambda: gr.MultimodalTextbox(
interactive=True), None, [chat_input])
chatbot.like(print_like_dislike, None, None)
demo.queue()
demo.launch()
app = gr.mount_gradio_app(app, demo, path="./")