SaiChaitanya's picture
Upload 106 files
25773cf verified
from typing import Optional, Type
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.tools import BaseTool
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.vectorstores import VectorStoreRetriever
from langchain_openai import ChatOpenAI
from openai import OpenAI
import requests
import os
class AssistantInput(BaseModel):
question: str = Field(description="The question to be asked from GPT models.")
class GPT4TAssistant(BaseTool):
name = "GPT4-Turbo_General_Assistant"
description = "Use this tool, when the user wants an answer from GPT4 or GPT4-Turbo general assistant.\n" +\
"This tool accepts one input question:\n" + \
"[question]\n" + \
"Don't change the input question from the user and don't change answer from this tool\n." +\
"Just pass it through to the user."
args_schema: Type[BaseModel] = AssistantInput
def _run(self, question: str):
prompt = ChatPromptTemplate.from_template(
"You are a general AI assistant. Answer questions with minimal and to the point explanation.\n" +
"Don't put safety and cultural warnings. Only warn about security." +
"answer the following question: {question}")
model = ChatOpenAI(model="gpt-4-turbo-preview")
output_parser = StrOutputParser()
chain = prompt | model | output_parser
return chain.invoke({"question": question})
async def _arun(self, question: str):
return self._run(question)
class GPT4TCodeGen(BaseTool):
name = "GPT4-Turbo_Code_Assistant"
description = "Use this tool, when the user wants code generated by GPT4 or GPT4-Turbo code assistant.\n" +\
"This tool accepts one input question:\n" + \
"[question]\n" + \
"Don't change the input question from the user and don't change answer from this tool\n." +\
"Just pass it through to the user."
args_schema: Type[BaseModel] = AssistantInput
def _run(self, question: str):
prompt = ChatPromptTemplate.from_template(
"You are a code assistant. Answer questions in code with minimal to no explanation.\n" +
"Put brief one line comments on the code for explanation." +
"answer the following question: {question}")
model = ChatOpenAI(model="gpt-4-turbo-preview")
output_parser = StrOutputParser()
chain = prompt | model | output_parser
return chain.invoke({"question": question})
async def _arun(self, question: str):
return self._run(question)
class GPT35TCodeGen(BaseTool):
name = "GPT35-Turbo_Code_Assistant"
description = "Use this tool, when the user wants code generated by GPT3.5 or GPT3.5-Turbo code assistant.\n" +\
"This tool accepts one input question:\n" + \
"[question]\n" + \
"Don't change the input question from the user and don't change answer from this tool\n." +\
"Just pass it through to the user."
args_schema: Type[BaseModel] = AssistantInput
def _run(self, question: str):
prompt = ChatPromptTemplate.from_template(
"You are a code assistant. Answer questions in code with minimal to no explanation.\n" +
"Put brief one line comments on the code for explanation." +
"answer the following question: {question}")
model = ChatOpenAI(model="gpt-4-turbo-preview")
output_parser = StrOutputParser()
chain = prompt | model | output_parser
return chain.invoke({"question": question})
async def _arun(self, question: str):
return self._run(question)
class GeneratorInput(BaseModel):
prompt: str = Field(description="The prompt for image generation.")
class DalleImageGen(BaseTool):
name = "Dalle3_Image_Generator"
description = "Use this tool, when the user wants images generated by Dall-e-3.\n" +\
"This tool accepts one input prompt:\n" + \
"[prompt]\n" + \
"The output is a json blob with image path in it. Pass the output to the user."
args_schema: Type[BaseModel] = GeneratorInput
model_name: str = "dall-e-3"#,"dall-e-2"
image_folder: str = "C:/Users/siaic/Desktop/BIA 810 B - Developing Business Applications using Generative AI/Project/bot/images"
image_number: int = 0
def _run(self, prompt: str):
client = OpenAI()
image_data = client.images.generate(
model=self.model_name,
prompt=prompt,
size="1024x1024",
quality="standard",
n=1,
)
image = requests.get(image_data.data[0].url,stream=True)
if image.status_code == 200:
image_path = os.path.join(self.image_folder,
f"dall-e-3_{self.image_number}.png")
self.image_number += 1
with open(image_path, 'wb') as f:
for chunk in image:
f.write(chunk)
return {"image_path":image_path}
#return f'"http://localhost:8501/{image_path}"
return {}
async def _arun(self, prompt: str):
return self._run(prompt)
class CareerRoadmapGenerator(BaseTool):
name = "Career_Roadmap_Generator"
description = "Use this tool to generate a career roadmap for a specific job description. Provide the job description, and the tool will generate a roadmap with step-by-step guidance on how to get that job, along with relevant resources."
retriever: Type[VectorStoreRetriever] = None
llm: Type[ChatOpenAI] = None
prompt: Type[ChatPromptTemplate] = None
def __init__(self,retriever,llm,prompt):
super().__init__()
self.retriever = retriever
self.llm = llm
self.prompt = prompt
args_schema: Type[BaseModel] = AssistantInput
@staticmethod
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
def _run(self, job_description: str):
roadmap_prompt = ChatPromptTemplate.from_template(
"You are a career roadmap generator. Based on the provided job description, create a roadmap with a step-by-step process to get that job. Provide relevant resources if possible. Job Description: {job_description}"
)
model = ChatOpenAI(model="gpt-4-turbo-preview")
output_parser = StrOutputParser()
roadmap_chain = roadmap_prompt | model | output_parser
roadmap_text = roadmap_chain.invoke({"job_description": job_description})
image_prompt = f"Create a deatiled roadmap with all required steps based on the following text: {roadmap_text}"
dalle_model = ChatOpenAI(model="dall-e-3")
image_output = dalle_model.generate_image(image_prompt)
return {"roadmap_text": roadmap_text, "roadmap_image": image_output}
async def _arun(self, job_description: str):
return self._run(job_description)
class RAGTool(BaseTool):
name = "RAG_Assistant"
description = "You are a Career Roadmap Generator.\n"+\
"Answer questions with the help of given job description and create breif step by step solutions for every job description user provides to get that role in that company.\n"+\
"Put step by step process to get the job for the specific job description. List as many most relevant skills as possble for that role at that company.\n"+\
"If possible provide few projects to work on before applying for that role which will increace the chance of getting selected.\n"+\
"Add the resources to learn, watch, practice if possible for each step. Don't give me generic roadmap. Provide in-depth roadmap.\n"+\
"Link all the realatd skills and give what skill to learn first followed by another in the roadmap.\n"+ \
"[question]\n" + \
"Don't change the input question from the user and don't change answer from this tool\n." +\
"Just pass it through to the user."
retriever: Type[VectorStoreRetriever] = None
llm: Type[ChatOpenAI] = None
prompt: Type[ChatPromptTemplate] = None
def __init__(self,retriever,llm,prompt):
super().__init__()
self.retriever = retriever
self.llm = llm
self.prompt = prompt
args_schema: Type[BaseModel] = AssistantInput
@staticmethod
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
def _run(self, question: str):
rag_chain = (
{"context": self.retriever | self.format_docs, "question": RunnablePassthrough()}
| self.prompt
| self.llm
| StrOutputParser()
)
rag_chain.invoke(question)
async def _arun(self, question: str):
return self._run(question)
class CombinedTool(BaseTool):
name = "Combined_RAG_and_Career_Roadmap_Generator"
description = "This tool combines the capabilities of the RAG Assistant and the Career Roadmap Generator. " \
"You can ask questions related to LLM Powered Autonomous Agents or provide a job description to " \
"generate a career roadmap infographic."
args_schema: Type[BaseModel] = AssistantInput
retriever: Type[VectorStoreRetriever] = None
llm: Type[ChatOpenAI] = None
prompt: Type[ChatPromptTemplate] = None
def __init__(self, retriever: VectorStoreRetriever, llm: ChatOpenAI, prompt: ChatPromptTemplate):
super().__init__()
self.retriever = retriever
self.llm = llm
self.prompt = prompt
@staticmethod
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
def _run(self, user_input: str):
if "job description:" in user_input.lower():
job_description = user_input.split("job description:")[-1].strip()
roadmap_prompt = ChatPromptTemplate.from_template(
"You are a career roadmap generator. Based on the provided job description, create an infographic solution with a step-by-step process to get that job. Provide relevant resources if possible. Job Description: {job_description}"
)
model = ChatOpenAI(model="gpt-4-turbo-preview")
output_parser = StrOutputParser()
roadmap_chain = roadmap_prompt | model | output_parser
roadmap_text = roadmap_chain.invoke({"job_description": job_description})
image_prompt = f"Create an infographic based on the following text: {roadmap_text}"
dalle_model = ChatOpenAI(model="dall-e-3")
image_output = dalle_model.generate_image(image_prompt)
return {"roadmap_text": roadmap_text, "roadmap_image": image_output}
else:
rag_chain = (
{"context": self.retriever | self.format_docs, "question": RunnablePassthrough()}
| self.prompt
| self.llm
| StrOutputParser()
)
return rag_chain.invoke(user_input)
async def _arun(self, user_input: str):
return self._run(user_input)