Muhammad Abdur Rahman Saad commited on
Commit
9e3fc9d
1 Parent(s): 718cf08

update article query service prompt

Browse files
Files changed (1) hide show
  1. controllers/article_query_service.py +17 -14
controllers/article_query_service.py CHANGED
@@ -13,6 +13,7 @@ load_dotenv()
13
  PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
14
  OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
15
 
 
16
  def article_agent(query, filter_params=None):
17
  # Initialize Pinecone
18
  try:
@@ -37,7 +38,8 @@ def article_agent(query, filter_params=None):
37
 
38
  # Validate and setup retriever with dynamic filtering based on IDs provided in filter_params
39
  try:
40
- if filter_params and isinstance(filter_params, list) and all(isinstance(id, str) for id in filter_params):
 
41
  search_filter = {"id": {"$in": filter_params}}
42
  else:
43
  if filter_params is not None:
@@ -45,7 +47,8 @@ def article_agent(query, filter_params=None):
45
  return None
46
  search_filter = {}
47
 
48
- retriever = vectorstore.as_retriever(search_kwargs={'filter': search_filter})
 
49
  print('Retriever Initialized')
50
  except Exception as e:
51
  print(f"Error configuring the retriever: {e}")
@@ -63,29 +66,29 @@ def article_agent(query, filter_params=None):
63
  try:
64
  prompt_template = """
65
  Assistant:
66
- You are an AI trained to assist users by analyzing financial documents.
67
- Your task is to extract pertinent information from these documents and answer
68
- queries based on them.
69
- Your responses should not only answer the query but also highlight key details
70
- and provide analytical depth where relevant.
71
- Make sure all responses are grammatically correct, well-structured, and
72
- formatted to meet professional standards.
73
 
74
  Query: {query}
75
 
76
  Context:
77
- Here are the key points from the financial documents provided:
78
  {context}
79
 
80
- Please synthesize this information and answer the query comprehensively, providing actionable insights and detailed explanations where necessary.
81
 
82
  Response:
83
  """
84
- prompt = PromptTemplate(input_variables=['context', 'query'], template=prompt_template)
85
- rag_chain = ({"context": retriever, "query": RunnablePassthrough()} | prompt | llm | StrOutputParser())
 
 
 
 
86
 
87
  return rag_chain.invoke(query)
88
  except Exception as e:
89
  print(f"Error during RAG chain setup or execution: {e}")
90
  return None
91
-
 
13
  PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
14
  OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
15
 
16
+
17
  def article_agent(query, filter_params=None):
18
  # Initialize Pinecone
19
  try:
 
38
 
39
  # Validate and setup retriever with dynamic filtering based on IDs provided in filter_params
40
  try:
41
+ if filter_params and isinstance(filter_params, list) and all(
42
+ isinstance(id, str) for id in filter_params):
43
  search_filter = {"id": {"$in": filter_params}}
44
  else:
45
  if filter_params is not None:
 
47
  return None
48
  search_filter = {}
49
 
50
+ retriever = vectorstore.as_retriever(
51
+ search_kwargs={'filter': search_filter})
52
  print('Retriever Initialized')
53
  except Exception as e:
54
  print(f"Error configuring the retriever: {e}")
 
66
  try:
67
  prompt_template = """
68
  Assistant:
69
+ You are an AI trained to assist users by analyzing financial documents.
70
+ Your task is to extract pertinent information from these documents and answer queries based on them
71
+ Your responses should not only answer the query but also highlight key details and provide analytical depth where relevant.
72
+ Make sure all responses are grammatically correct, well-structured, and formatted to meet professional standards.
 
 
 
73
 
74
  Query: {query}
75
 
76
  Context:
77
+ These are the finanicial documents that are relevant to the query:
78
  {context}
79
 
80
+ Please synthesize this information and answer the query comprehensively, providing actionable insights and detailed explanations where necessary.
81
 
82
  Response:
83
  """
84
+ prompt = PromptTemplate(input_variables=['context', 'query'],
85
+ template=prompt_template)
86
+ rag_chain = ({
87
+ "context": retriever,
88
+ "query": RunnablePassthrough()
89
+ } | prompt | llm | StrOutputParser())
90
 
91
  return rag_chain.invoke(query)
92
  except Exception as e:
93
  print(f"Error during RAG chain setup or execution: {e}")
94
  return None