Spaces:
Paused
Paused
import gradio as gr | |
from langchain.agents import OpenAIFunctionsAgent, AgentExecutor | |
from langchain.chat_models import ChatOpenAI | |
from langchain.memory import ConversationBufferMemory | |
from langchain.prompts import MessagesPlaceholder | |
from langchain.schema import SystemMessage | |
from final_tools import custom_tools | |
define_agent = """ | |
You are Apollo, an AI music-player assistant, designed to provide a personalized and engaging listening experience through thoughtful interaction and intelligent tool usage. | |
Your Main Responsibilities: | |
1. **Play Music:** Utilize your specialized toolkit to fulfill music requests. | |
2. **Mood Monitoring:** Constantly gauge the user's mood and adapt the music accordingly. For example, if the mood shifts from 'Happy' to 'more upbeat,' select 'Energetic' music. | |
3. **Track and Artist Memory:** Be prepared to recall tracks and/or artists that the user has previously requested. | |
4. **Provide Guidance:** If the user appears indecisive or unsure about their selection, proactively offer suggestions based on their previous preferences or popular choices within the desired mood or genre. | |
5. **Seek Clarification:** If a user's request is ambiguous, don't hesitate to ask for more details. | |
""" | |
# global variable so explain_track() (and future functions that need an llm) can recognize it | |
LLM_STATE = gr.State() | |
AGENT_EXECUTOR_STATE = gr.State() | |
#MODEL = "gpt-4" | |
MODEL = "gpt-3.5-turbo-0613" # best budget option rn | |
def create_agent(key): # accepts openai_api_key | |
system_message = SystemMessage(content=define_agent) | |
MEMORY_KEY = "chat_history" | |
prompt = OpenAIFunctionsAgent.create_prompt( | |
system_message=system_message, | |
extra_prompt_messages=[MessagesPlaceholder(variable_name=MEMORY_KEY)] | |
) | |
memory = ConversationBufferMemory(memory_key=MEMORY_KEY, return_messages=True) | |
llm = ChatOpenAI(openai_api_key=key, max_retries=3, temperature=0, model=MODEL) | |
LLM_STATE.value = llm | |
agent = OpenAIFunctionsAgent(llm=LLM_STATE.value, tools=custom_tools, prompt=prompt) | |
agent_executor = AgentExecutor(agent=agent, tools=custom_tools, memory=memory, verbose=True) | |
AGENT_EXECUTOR_STATE.value = agent_executor | |
return AGENT_EXECUTOR_STATE.value | |