f5_model_final / app /helpers /plan_chat.py
EL GHAFRAOUI AYOUB
C'
6f14d8b
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from helpers.generate_embbedings import vector_store
from langchain_aws import ChatBedrock
import os
def make_prompt(history, prompt, context=None):
formatted_history = ""
if context:
formatted_history += f"[CONTEXT] {context} [/CONTEXT]\n"
for history_item in history:
if history_item.from_ == 'user':
formatted_history += f"[INST] {history_item.message} [/INST]\n"
else:
formatted_history += f"{history_item.message}\n"
formatted_history += f"[INST] {prompt} [/INST]\n"
return formatted_history
prompt = ChatPromptTemplate.from_template("{prompt}")
model = ChatBedrock(
model="mistral.mistral-7b-instruct-v0:2",
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"),
region=os.environ.get("AWS_DEFAULT_REGION"),
max_tokens=8000,
temperature=0
)
output_parser = StrOutputParser()
chain = prompt | model | output_parser
async def ask_question(question: str, history: list = [], project_id=None):
"""
Generate a response for a given question based on history and project-specific context.
"""
try:
context = ""
if project_id is not None:
context = vector_store.similarity_search(
query=question, k=4, filter={"project_id": project_id}
)
prompt = make_prompt(history, question, context)
stream = chain.astream({"prompt": prompt})
async for chunk in stream:
yield chunk
except Exception as e:
raise RuntimeError(f"Error generating response: {str(e)}")
gpt_model = ChatOpenAI(
temperature=0.7,
model='gpt-4o-mini'
)
chain = prompt | gpt_model | StrOutputParser()
async def ask_openai(question: str, history: list = []):
"""
Generate a response for a given question based on history and project-specific context.
"""
ai_response = "I am the CloudMod Solutions Architect, an expert in AWS, Azure & GCP. How can I help you?"
try:
context = ("You are a AI Assistant for CloudMod Soluctions Architect, an expert in AWS, Azure & GCP \n"
"If asked question such as `what the chat does, what they are`\n"
"Answer question as per the context \n\n"
f"Here is the user query : {question}"
f"here is the previous chat history: {history}"
)
prompt = context
stream = chain.astream({"prompt": prompt})
async for chunk in stream:
yield chunk
except Exception as e:
raise RuntimeError(f"Error generating response: {str(e)}")