import json import time from transformers import AutoTokenizer, AutoModel from langchain_community.chat_models import ChatOpenAI import pandas as pd from config import settings from langchain_core.utils.function_calling import convert_to_openai_function from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.memory import ConversationBufferWindowMemory from langchain.schema.runnable import RunnablePassthrough from langchain.agents.format_scratchpad import format_to_openai_functions from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser from langchain.agents import AgentExecutor from tools import MEMORY, set_chatbot_name, close_chat, recommand_podcast,app_features,joke_teller from database_functions import get_chat_bot_name,get_chat_history, get_last_conversion, get_last_session, get_mood_data def get_mood_summary(user_id): data = get_mood_data(user_id) system_prompt = """You are an descripting assistant that provides the breif description of the user data which is related to their mood tracking activity. Properly descibe the reason for their mood.Avoid times and dates in description Here is the user data: {data}""" llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.0) return llm.invoke(system_prompt.format(data=data)).content def deanonymizer(input, anonymizer): input = anonymizer.deanonymize(input) map = anonymizer.deanonymizer_mapping if map: for k in map["PERSON"]: names = k.split(" ") for i in names: input = input.replace(i, map["PERSON"][k]) return input def get_last_session_summary(last_session_id, second_last_session_id): conversation = get_last_conversion(last_session_id,second_last_session_id) if conversation: system_prompt = """ summerize whole conversation. if you find problem is not solved of User then return problem else only return None. nothing else. conversation: {conversation} summary: """ llm = ChatOpenAI(model=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, temperature=0.0) response = llm.invoke(system_prompt.format(conversation=conversation)).content # print("********************************* PREVIOUS PROBLEM *******************************************") # print(response) return response else: return "" def create_agent(user_id,is_first = False): # print("get user Id**********************",user_id) previous_session_id = get_last_session(user_id) # print(previous_session_id) tools = [set_chatbot_name,close_chat,recommand_podcast,app_features,joke_teller] functions = [convert_to_openai_function(f) for f in tools] model = ChatOpenAI(model_name=settings.OPENAI_MODEL, openai_api_key=settings.OPENAI_KEY, frequency_penalty= 1, temperature=0.7).bind(functions=functions) chat_bot_name = get_chat_bot_name(user_id) extra_prompt = "" previous_problem_summary = None if is_first: start = time.time() mood_summary = get_mood_summary(user_id) if previous_session_id['second_last_session_id']: previous_problem_summary = get_last_session_summary(previous_session_id['last_session_id'], previous_session_id['second_last_session_id']) print("time require for mood summary: ",time.time()-start) if previous_problem_summary.find('None') == -1: extra_prompt = f"""ask user her previous problem is solved or not.use previous problem summary for framming the question. must include her name which is {user_id} .nothing else.""" else: extra_prompt = f""" Only use these templates to start conversation:- 1. Hey again {user_id}! How's it going? 2. What's up today? Need ✨ Advice, ✨ a Mood Boost, ✨ a Chat, ✨ Resource Suggestions, ✨ App Features help? How can I help?" use any one of the question for response based on your understanding not use anything else simply return one of these two only. """ prompt = ChatPromptTemplate.from_messages([("system", settings.SYSTEM_PROMPT.format(name = chat_bot_name, mood="", previous_summary=previous_problem_summary)+extra_prompt), MessagesPlaceholder(variable_name="chat_history"), ("user", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad")]) memory = ConversationBufferWindowMemory(memory_key="chat_history", chat_memory=get_chat_history( previous_session_id['last_session_id']), return_messages=True, k=5) # print("memory created") global MEMORY MEMORY = memory chain = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_to_openai_functions(x["intermediate_steps"])) | prompt | model | OpenAIFunctionsAgentOutputParser() agent_executor = AgentExecutor( agent=chain, tools=tools, memory=memory, verbose=True) return agent_executor