Mehrdad Esmaeili commited on
Commit
2600c4e
1 Parent(s): f9d322d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -51,10 +51,10 @@ retriever=docsearch.as_retriever()
51
  cohereLLM=Cohere(model='command')
52
  # cohereLLM=OpenAI()
53
  # Initialize the CohereRerank compressor and the ContextualCompressionRetriever
54
- compressor = CohereRerank(user_agent='MyTool/1.0 (Linux; x86_64)')
55
- compression_retriever = ContextualCompressionRetriever(
56
- base_compressor=compressor, base_retriever=retriever
57
- )
58
 
59
  # delete this to return to production state
60
  memory=ConversationSummaryMemory(
@@ -64,7 +64,7 @@ question_generator = LLMChain(llm=cohereLLM, prompt=CONDENSE_QUESTION_PROMPT)
64
  doc_chain = load_qa_with_sources_chain(cohereLLM, chain_type="refine")
65
 
66
  rag_chain=chain = ConversationalRetrievalChain(
67
- retriever=compression_retriever,
68
  question_generator=question_generator,
69
  combine_docs_chain=doc_chain,
70
  return_source_documents=True
 
51
  cohereLLM=Cohere(model='command')
52
  # cohereLLM=OpenAI()
53
  # Initialize the CohereRerank compressor and the ContextualCompressionRetriever
54
+ # compressor = CohereRerank(user_agent='MyTool/1.0 (Linux; x86_64)')
55
+ # compression_retriever = ContextualCompressionRetriever(
56
+ # base_compressor=compressor, base_retriever=retriever
57
+ # )
58
 
59
  # delete this to return to production state
60
  memory=ConversationSummaryMemory(
 
64
  doc_chain = load_qa_with_sources_chain(cohereLLM, chain_type="refine")
65
 
66
  rag_chain=chain = ConversationalRetrievalChain(
67
+ retriever=retriever,
68
  question_generator=question_generator,
69
  combine_docs_chain=doc_chain,
70
  return_source_documents=True