norjala commited on
Commit
197f71b
1 Parent(s): be8a991

Updated incorrect variable name

Browse files
Files changed (1) hide show
  1. app.py +15 -26
app.py CHANGED
@@ -64,45 +64,38 @@ retriever = qdrant_vector_store.as_retriever()
64
  """
65
  ### 1. DEFINE STRING TEMPLATE
66
  RAG_PROMPT_TEMPLATE = """\
67
- <|start_header_id|>system<|end_header_id|>
68
  You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context,\
69
- say you don't know.<|eot_id|>
70
- <|start_header_id|>user<|end_header_id|>
71
  User Query:
72
  {query}
73
  Context:
74
- {context}<|eot_id|>
75
- <|start_header_id|>assistant<|end_header_id|>
76
  """
77
- #Note that we do not have the response here. We have assistent, we ONLY start, but not followed by <|eot_id> as we do not have a response YET.
78
 
79
  rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT_TEMPLATE)
80
 
81
-
82
  # Define the LLM
83
  llm = ChatOpenAI(model_name="gpt-4o")
84
 
85
- #-----Creating a Retrieval Augmented Generation (RAG) Chain-----#
86
- # The RAG chain:
87
- # (1) Takes the user question and retrieves relevant context,
88
- # (2) Passes the context through unchanged,
89
- # (3) Formats the prompt with context and question, then send it to the LLM to generate a response
90
-
91
  retrieval_augmented_qa_chain = (
92
- # INVOKE CHAIN WITH: {"question" : "<>"}
93
- # "question" : populated by getting the value of the "question" key
94
- # "context" : populated by getting the value of the "question" key and chaining it into the base_retriever
95
- {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
96
  # "context" : is assigned to a RunnablePassthrough object (will not be called or considered in the next step)
97
  # by getting the value of the "context" key from the previous step
98
  | RunnablePassthrough.assign(context=itemgetter("context"))
99
- # "response" : the "context" and "question" values are used to format our prompt object and then piped
100
  # into the LLM and stored in a key called "response"
101
  # "context" : populated by getting the value of the "context" key from the previous step
102
  | {"response": rag_prompt | llm, "context": itemgetter("context")}
103
  )
104
 
105
- # Sets initial chat settings at the start of a user session
106
  @cl.on_chat_start
107
  async def start_chat():
108
  """
@@ -120,20 +113,16 @@ async def start_chat():
120
  }
121
  cl.user_session.set("settings", settings)
122
 
123
- # Processes incoming messages from the user and sends a response through a series of steps:
124
- # (1) Retrieves the user's settings
125
- # (2) Invokes the RAG chain with the user's message
126
- # (3) Extracts the content from the response and sends it back to the user
127
 
128
  @cl.on_message
129
  async def handle_message(message: cl.Message):
130
  settings = cl.user_session.get("settings")
131
 
132
- response = retrieval_augmented_qa_chain.invoke({"question": message.content})
133
-
134
 
135
  # Extracting and sending just the content
136
  content = response["response"].content
137
  pretty_content = content.strip()
138
 
139
- await cl.Message(content=pretty_content).send()
 
64
  """
65
  ### 1. DEFINE STRING TEMPLATE
66
  RAG_PROMPT_TEMPLATE = """\
67
+ system
68
  You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context,\
69
+ say you don't know.
70
+ user
71
  User Query:
72
  {query}
73
  Context:
74
+ {context}
75
+ assistant
76
  """
77
+ # Note that we do not have the response here. We have assistant, we ONLY start, but not followed by <|eot_id> as we do not have a response YET.
78
 
79
  rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT_TEMPLATE)
80
 
 
81
  # Define the LLM
82
  llm = ChatOpenAI(model_name="gpt-4o")
83
 
 
 
 
 
 
 
84
  retrieval_augmented_qa_chain = (
85
+ # INVOKE CHAIN WITH: {"query" : "<>"}
86
+ # "query" : populated by getting the value of the "question" key
87
+ # "context" : populated by getting the value of the "query" key and chaining it into the base_retriever
88
+ {"context": itemgetter("query") | retriever, "query": itemgetter("query")}
89
  # "context" : is assigned to a RunnablePassthrough object (will not be called or considered in the next step)
90
  # by getting the value of the "context" key from the previous step
91
  | RunnablePassthrough.assign(context=itemgetter("context"))
92
+ # "response" : the "context" and "query" values are used to format our prompt object and then piped
93
  # into the LLM and stored in a key called "response"
94
  # "context" : populated by getting the value of the "context" key from the previous step
95
  | {"response": rag_prompt | llm, "context": itemgetter("context")}
96
  )
97
 
98
+ # Sets initial chat settings
99
  @cl.on_chat_start
100
  async def start_chat():
101
  """
 
113
  }
114
  cl.user_session.set("settings", settings)
115
 
116
+ # Initializes user session w/ settings and retrieves context based on query
 
 
 
117
 
118
  @cl.on_message
119
  async def handle_message(message: cl.Message):
120
  settings = cl.user_session.get("settings")
121
 
122
+ response = retrieval_augmented_qa_chain.invoke({"query": message.content})
 
123
 
124
  # Extracting and sending just the content
125
  content = response["response"].content
126
  pretty_content = content.strip()
127
 
128
+ await cl.Message(content=pretty_content).send()