bmoxi / utils.py
HarshSanghavi's picture
Upload 4 files
0147a0e verified
import json
import time
from transformers import AutoTokenizer, AutoModel
from langchain_community.chat_models import ChatOpenAI
import pandas as pd
from config import settings
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.memory import ConversationBufferWindowMemory
from langchain.schema.runnable import RunnablePassthrough
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.agents import AgentExecutor
from tools import MEMORY, set_chatbot_name, close_chat, recommand_podcast,app_features,joke_teller,SESSION_ID
from database_functions import get_chat_bot_name,get_chat_history, get_last_conversion, get_last_session, get_mood_data,save_message
def get_mood_summary(user_id):
data = get_mood_data(user_id)
system_prompt = """You are an descripting assistant that provides the breif description of the user data which is related to their mood tracking activity. Properly descibe the reason for their mood.Avoid times and dates in description
Here is the user data: {data}"""
llm = ChatOpenAI(model=settings.OPENAI_MODEL,
openai_api_key=settings.OPENAI_KEY, temperature=0.0)
return llm.invoke(system_prompt.format(data=data)).content
def deanonymizer(input, anonymizer):
input = anonymizer.deanonymize(input)
map = anonymizer.deanonymizer_mapping
if map:
for k in map["PERSON"]:
names = k.split(" ")
for i in names:
input = input.replace(i, map["PERSON"][k])
return input
def get_last_session_summary(last_session_id, second_last_session_id):
conversation = get_last_conversion(last_session_id,second_last_session_id)
if conversation:
system_prompt = """ context: there is one typical conversation going on between two high school gen z girls.
you are one of the high school gen z girl. your voice is edgy and raw.
must use I for AI or BMOXI and for human use my friend. in summary.
this is your conversation with your best friend. summerize whole conversation and return summary nothing else.
conversation: {conversation}
summary:
"""
llm = ChatOpenAI(model=settings.OPENAI_MODEL,
openai_api_key=settings.OPENAI_KEY, temperature=0.0)
response = llm.invoke(system_prompt.format(conversation=conversation)).content
return response
else:
return ""
def create_agent(user_id,is_first = False):
# print("get user Id**********************",user_id)
previous_session_id = get_last_session(user_id)
# print(previous_session_id)
tools = [set_chatbot_name,close_chat,recommand_podcast,app_features,joke_teller]
functions = [convert_to_openai_function(f) for f in tools]
model = ChatOpenAI(model_name=settings.OPENAI_MODEL,
openai_api_key=settings.OPENAI_KEY, frequency_penalty= 1, temperature=0.7).bind(functions=functions)
chat_bot_name = get_chat_bot_name(user_id)
extra_prompt = ""
previous_problem_summary = None
if is_first:
start = time.time()
mood_summary = get_mood_summary(user_id)
print(previous_session_id)
if previous_session_id['second_last_session_id']:
previous_problem_summary = get_last_session_summary(previous_session_id['last_session_id'], previous_session_id['second_last_session_id'])
print('PREVious summary-------------------------',previous_problem_summary)
save_message(user_id=user_id,query=previous_problem_summary)
print("time require for mood summary: ",time.time()-start)
extra_prompt = f"""ask user her previous problem is solved or not.use previous problem summary for framming the question.
also must start message with: "hey {user_id}!"
"""
print('extra prompt'+ extra_prompt)
prompt = ChatPromptTemplate.from_messages([("system", settings.SYSTEM_PROMPT.format(name = chat_bot_name, mood="", previous_summary=previous_problem_summary)+extra_prompt),
MessagesPlaceholder(variable_name="chat_history"), ("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")])
memory = ConversationBufferWindowMemory(memory_key="chat_history", chat_memory=get_chat_history(
previous_session_id['last_session_id']), return_messages=True, k=5)
# print("memory created")
global MEMORY,SESSION_ID
MEMORY = memory
SESSION_ID = previous_session_id['last_session_id']
chain = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_to_openai_functions(x["intermediate_steps"])) | prompt | model | OpenAIFunctionsAgentOutputParser()
agent_executor = AgentExecutor(
agent=chain, tools=tools, memory=memory, verbose=True)
return agent_executor