Spaces:
Sleeping
Sleeping
File size: 1,234 Bytes
034eb7d 8e786b4 034eb7d 8e786b4 034eb7d 8e786b4 034eb7d 8e786b4 034eb7d 8e786b4 034eb7d 8e786b4 034eb7d 8e786b4 034eb7d 8e786b4 034eb7d 8e786b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import gradio as gr
import logging
import uuid
from dotenv import load_dotenv
load_dotenv(override=True)
from langhcain_agent import llm_inference
def predict_interface(message, history=None, user_id = None):
response = llm_inference(message, history, user_id)
logging.error(response)
logging.error(user_id)
return response['output']
session_id = gr.Textbox(value = str(uuid.uuid4()), type = "text", label = "session_id")
example_sentences=["Recommend me something in Quentin Tarantino reggae style", "Give me songs with calm and relaxing vibes", "I want to listen to something like the movie Inception", "I want music that sounds like Lebron James eating soup"]
examples = [[example, f"user_{i}"] for i, example in enumerate(example_sentences)]
chat = gr.ChatInterface(
predict_interface,
additional_inputs= [session_id],
chatbot=gr.Chatbot(height=600),
textbox=gr.Textbox(placeholder="Ask me for music recommendations!", container=False, scale=7),
description="This AI makes song recommendations based on your music style.",
examples=examples,
title="Persona Music song recommender",
retry_btn="Retry",
clear_btn="Clear",
undo_btn = None
)
chat.queue().launch()
|