nickmuchi commited on
Commit
74338cd
1 Parent(s): ead5a94

Update functions.py

Browse files
Files changed (1) hide show
  1. functions.py +8 -11
functions.py CHANGED
@@ -60,6 +60,7 @@ margin-bottom: 2.5rem">{}</div> """
60
 
61
  memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
62
 
 
63
  #Stuff Chain Type Prompt template
64
 
65
  @st.cache_resource
@@ -89,12 +90,8 @@ def load_prompt():
89
  ----------------
90
  {context}"""
91
 
92
- messages = [
93
- SystemMessagePromptTemplate.from_template(system_template),
94
- HumanMessagePromptTemplate.from_template("{question}")
95
- ]
96
- prompt = ChatPromptTemplate.from_messages(messages)
97
-
98
  return prompt
99
 
100
  ###################### Functions #######################################################################################
@@ -216,12 +213,10 @@ def gen_embeddings(embedding_model):
216
 
217
  return embeddings
218
 
219
- @st.cache_data
220
  def embed_text(query,embedding_model,_docsearch):
221
 
222
  '''Embed text and generate semantic search scores'''
223
-
224
- chat_history = []
225
  # llm = OpenAI(temperature=0)
226
  chat_llm = ChatOpenAI(streaming=True,
227
  model_name = 'gpt-4',
@@ -235,10 +230,12 @@ def embed_text(query,embedding_model,_docsearch):
235
  # return_source_documents=True)
236
 
237
  chain = ConversationalRetrievalChain.from_llm(chat_llm,
238
- retriever= _docsearch.as_retriever(),
239
- #qa_prompt = load_prompt(),
240
  memory = memory,
241
  return_source_documents=True)
 
 
242
 
243
  answer = chain({"question": query})
244
 
 
60
 
61
  memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
62
 
63
+
64
  #Stuff Chain Type Prompt template
65
 
66
  @st.cache_resource
 
90
  ----------------
91
  {context}"""
92
 
93
+ prompt = SystemMessagePromptTemplate.from_template(system_template),
94
+
 
 
 
 
95
  return prompt
96
 
97
  ###################### Functions #######################################################################################
 
213
 
214
  return embeddings
215
 
 
216
  def embed_text(query,embedding_model,_docsearch):
217
 
218
  '''Embed text and generate semantic search scores'''
219
+
 
220
  # llm = OpenAI(temperature=0)
221
  chat_llm = ChatOpenAI(streaming=True,
222
  model_name = 'gpt-4',
 
230
  # return_source_documents=True)
231
 
232
  chain = ConversationalRetrievalChain.from_llm(chat_llm,
233
+ retriever= _docsearch.as_retriever(search_kwargs={"k": 3})),
234
+ get_chat_history=lambda h : h,
235
  memory = memory,
236
  return_source_documents=True)
237
+
238
+ chain.combine_docs_chain.llm_chain.prompt.messages[0] = load_prompt()
239
 
240
  answer = chain({"question": query})
241