Mehrdad Esmaeili commited on
Commit
8e1c3e9
1 Parent(s): 1b2762e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -17
app.py CHANGED
@@ -45,24 +45,33 @@ qa = RetrievalQA.from_chain_type(llm=Cohere(model='command'), chain_type="stuff"
45
  retriever=docsearch.as_retriever(search_kwargs={'k':1}),return_source_documents=True)
46
 
47
  def predict(message, history):
48
- history_langchain_format = []
49
- for human, ai in history:
50
- history_langchain_format.append(HumanMessage(content=human))
51
- history_langchain_format.append(AIMessage(content=ai))
52
- history_langchain_format.append(HumanMessage(content=message))
53
- gpt_response = qa({'query':history_langchain_format})
54
- return gpt_response['result']
 
 
 
 
 
 
 
 
 
55
 
56
- # message=message+'? just the book title-Author'
57
- # result = qa({"query": message})
58
- # # r1=docsearch.similarity_search_with_score(query=q,k=3)
59
- # # print([(item[-2].metadata,item[-1]) for item in r1],\
60
- # # '\n\n',result['result'],f'|| {result["source_documents"][0].metadata}','\n*****\n')
61
- # if result['result'] not in ["I don't know","I don't know."]:
62
- # return result['result']+f'\n---\nAmazon Kindle ebook description is:\n {result["source_documents"][0].page_content}'+\
63
- # f'\nfrom this file: {result["source_documents"][0].metadata}'
64
- # else:
65
- # return result['result']
66
 
67
  gr.ChatInterface(predict,
68
  chatbot=gr.Chatbot(height='auto'),
 
45
  retriever=docsearch.as_retriever(search_kwargs={'k':1}),return_source_documents=True)
46
 
47
  def predict(message, history):
48
+ '''experimenation with memory and conversation retrieval chain has resulted in less
49
+ performance, usefulness, and more halucination. Hence, this chat bot provides one
50
+ shot answers with zero memory. You can use the code in github notebooks to do this
51
+ experimentation. github.com/mehrdad-es/Amazon-But-Better'''
52
+ # history_langchain_format = []
53
+ # for human, ai in history:
54
+ # history_langchain_format.append(HumanMessage(content=human))
55
+ # history_langchain_format.append(AIMessage(content=ai))
56
+ # history_langchain_format.append(HumanMessage(content=message))
57
+ # gpt_response = qa({'query':history_langchain_format})
58
+ # return gpt_response['result']
59
+ # gpt_response = qa({'query':''.join(history)+f'.\n given the previous conversation respond using the following prompt:{message}'})
60
+ # # print(gpt_response)
61
+ # history.append((f'HumanMessage:{message}',f'AIMessage: {gpt_response},'))
62
+ # # history=history_langchain_format
63
+ # return gpt_response['result']
64
 
65
+ message=message+'? just the book title-Author'
66
+ result = qa({"query": message})
67
+ # r1=docsearch.similarity_search_with_score(query=q,k=3)
68
+ # print([(item[-2].metadata,item[-1]) for item in r1],\
69
+ # '\n\n',result['result'],f'|| {result["source_documents"][0].metadata}','\n*****\n')
70
+ if result['result'] not in ["I don't know","I don't know."]:
71
+ return result['result']+f'\n---\nAmazon Kindle ebook description is:\n {result["source_documents"][0].page_content}'+\
72
+ f'\nfrom this file: {result["source_documents"][0].metadata}'
73
+ else:
74
+ return result['result']
75
 
76
  gr.ChatInterface(predict,
77
  chatbot=gr.Chatbot(height='auto'),