File size: 3,670 Bytes
c169262 4e3ede3 f4afb56 58bf589 4782643 e5b7602 4e3ede3 e5b7602 c169262 f4afb56 c169262 26eb10e e5b7602 c169262 e5b7602 dab1f93 f38ab31 c169262 e5b7602 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import gradio as gr
import requests
from fastapi import FastAPI
from fastapi.testclient import TestClient
from services.utils import undo_last_message, clear_chat
from services.nlp import transcribe
from routes import input_handler, purchase, order_management, account_management, customer_support, search_products
app = FastAPI()
app.include_router(purchase.router, prefix="/purchase", tags=["purchase"])
app.include_router(order_management.router,
prefix="/order-management", tags=["order-management"])
app.include_router(account_management.router,
prefix="/account-management", tags=["account-management"])
app.include_router(customer_support.router,
prefix="/customer-support", tags=["customer-support"])
app.include_router(search_products.router,
prefix="/search-products", tags=["search-products"])
app.include_router(input_handler.router,
prefix="/input-handler", tags=["input-handler"])
# Initialize the TestClient with the FastAPI app
client = TestClient(app)
def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)
def add_message(history, message, audio_input):
if message is None and audio_input is None:
return "Please provide either text or audio."
history_openai_format = [
{"role": "system", "content": "You are an assistant for an eCommerce store."}]
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append(
{"role": "assistant", "content": assistant})
if message["text"] is not None:
history_openai_format.append(
{"role": "user", "content": message["text"]})
if audio_input:
transcription = transcribe(audio_input)
print(f"Transcription: {transcription}")
message["text"] += f' [Audio transcription] = {transcription}'
history_openai_format.append(
{"role": "user", "content": transcription})
for x in message["files"]:
#history.append(((x,), None))
message["text"] += ' [File attached]'
history_openai_format.append(
{"role": "user", "content": "Image attached"})
response = client.post("/input-handler/", json={"text": message["text"], "files": message["files"], "history": history_openai_format})
if response.status_code == 200:
bot_response = response.json().get("generative response")
history.append((message["text"], bot_response))
return history, gr.MultimodalTextbox(value=None, interactive=False), None
with gr.Blocks(theme="soft") as demo:
gr.Markdown("<h1 style='text-align: center;'>Fastlane Chat GPT</h1>")
gr.Markdown("AI sales assistance for e-commerce")
chatbot = gr.Chatbot(
height=400,
elem_id="chatbot"
)
# Add clear and undo buttons
with gr.Row():
undo_btn = gr.Button("Delete Previous")
clear_btn = gr.Button("Clear")
undo_btn.click(undo_last_message, chatbot, chatbot)
clear_btn.click(clear_chat, [], chatbot)
chat_input = gr.MultimodalTextbox(
interactive=True, placeholder="Enter message, upload file, or record audio...", show_label=False)
audio_input = gr.Audio(sources=["microphone"])
chat_msg = chat_input.submit(
add_message, [chatbot, chat_input, audio_input], [chatbot, chat_input, audio_input])
chat_msg.then(lambda: gr.MultimodalTextbox(
interactive=True), None, [chat_input])
chatbot.like(print_like_dislike, None, None)
demo.queue()
demo.launch()
app = gr.mount_gradio_app(app, demo, path="./") |