nickmuchi commited on
Commit
060f21e
1 Parent(s): ea5ec1b

Update functions.py

Browse files
Files changed (1) hide show
  1. functions.py +12 -8
functions.py CHANGED
@@ -31,7 +31,7 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
31
  from langchain.chat_models import ChatOpenAI
32
  from langchain.callbacks.base import CallbackManager
33
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
34
- from langchain.chains import ConversationalRetrievalChain, QAGenerationChain
35
  from langchain.memory import ConversationBufferMemory
36
 
37
  from langchain.prompts.chat import (
@@ -224,19 +224,23 @@ def embed_text(query,embedding_model,_docsearch):
224
  chat_history = []
225
  # llm = OpenAI(temperature=0)
226
  chat_llm = ChatOpenAI(streaming=True,
227
- model_name = 'gpt-3.5-turbo',
228
  callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
229
  verbose=True,
230
  temperature=0
231
  )
232
 
233
- chain = ConversationalRetrievalChain.from_llm(chat_llm,
234
- retriever= _docsearch.as_retriever(),
235
- # condense_question_prompt = load_prompt(),
236
- memory = memory,
237
- return_source_documents=True)
238
 
239
- answer = chain({"question": query})
 
 
 
 
 
 
240
 
241
  return answer
242
 
 
31
  from langchain.chat_models import ChatOpenAI
32
  from langchain.callbacks.base import CallbackManager
33
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
34
+ from langchain.chains import ConversationalRetrievalChain, QAGenerationChain, RetrievalQA
35
  from langchain.memory import ConversationBufferMemory
36
 
37
  from langchain.prompts.chat import (
 
224
  chat_history = []
225
  # llm = OpenAI(temperature=0)
226
  chat_llm = ChatOpenAI(streaming=True,
227
+ model_name = 'gpt-4',
228
  callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
229
  verbose=True,
230
  temperature=0
231
  )
232
 
233
+ chain = RetrievalQA.from_chain_type(llm=chat_llm, chain_type="stuff",
234
+ retriever=_docsearch.as_retriever(),
235
+ return_source_documents=True)
 
 
236
 
237
+ # chain = ConversationalRetrievalChain.from_llm(chat_llm,
238
+ # retriever= _docsearch.as_retriever(),
239
+ # # condense_question_prompt = load_prompt(),
240
+ # memory = memory,
241
+ # return_source_documents=True)
242
+
243
+ answer = chain({"query": query})
244
 
245
  return answer
246