|
from langchain.agents import AgentExecutor, create_openai_tools_agent,Tool |
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder |
|
from langchain_openai import ChatOpenAI, OpenAIEmbeddings |
|
from langchain_community.vectorstores import Chroma |
|
from langchain import hub |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.prompts import MessagesPlaceholder |
|
from engine.tools import GPT35TCodeGen, GPT4TAssistant, GPT4TCodeGen, DalleImageGen,RAGTool, CombinedTool, CareerRoadmapGenerator |
|
|
|
def create_agent(model_name): |
|
|
|
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) |
|
|
|
rag_llm = ChatOpenAI(model_name="gpt-4o", temperature=0.1) |
|
|
|
rag_prompt = hub.pull("rlm/rag-prompt") |
|
rag_db = Chroma(persist_directory="../../chroma_db", |
|
embedding_function=OpenAIEmbeddings()) |
|
rag_retriever = rag_db.as_retriever() |
|
|
|
|
|
tools = [RAGTool(rag_retriever,rag_llm,rag_prompt)] |
|
|
|
llm = ChatOpenAI(model=model_name, temperature=0) |
|
|
|
system_message = "You are a Career Roadmap Generator.\n" + \ |
|
"Answer questions with the help of given job description and create breif step by step solutions for every job description user provides to get that role in that company.\n" + \ |
|
"Put step by step process to get the job for the specific job description. List as many most relevant skills as possble for that role at that company.\n" + \ |
|
"If possible provide few projects to work on before applying for that role which will increace the chance of getting selected.\n" + \ |
|
"Add the resources to learn, watch, practice if possible for each step. Don't give me generic roadmap. Provide in-depth roadmap.\n" + \ |
|
"Link all the realatd skills and give what skill to learn first followed by another in the roadmap." |
|
|
|
|
|
prompt = ChatPromptTemplate.from_messages( |
|
[ |
|
("system", system_message), |
|
MessagesPlaceholder("chat_history", optional=True), |
|
("human", "{input}"), |
|
MessagesPlaceholder("agent_scratchpad"), |
|
] |
|
) |
|
agent = create_openai_tools_agent(llm, tools, prompt) |
|
agent_exe = AgentExecutor(agent=agent, tools=tools,memory=memory,verbose=True) |
|
return agent_exe |
|
|
|
async def run_agent(agent,user_query): |
|
|
|
|
|
print(agent.memory.chat_memory) |
|
print('********************') |
|
print() |
|
return await agent.ainvoke(input={"input":user_query},verbose=True) |