Spaces:
Runtime error
Runtime error
Mehrdad Esmaeili
commited on
Commit
•
8e1c3e9
1
Parent(s):
1b2762e
Update app.py
Browse files
app.py
CHANGED
@@ -45,24 +45,33 @@ qa = RetrievalQA.from_chain_type(llm=Cohere(model='command'), chain_type="stuff"
|
|
45 |
retriever=docsearch.as_retriever(search_kwargs={'k':1}),return_source_documents=True)
|
46 |
|
47 |
def predict(message, history):
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
history_langchain_format
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
#
|
59 |
-
#
|
60 |
-
#
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
|
67 |
gr.ChatInterface(predict,
|
68 |
chatbot=gr.Chatbot(height='auto'),
|
|
|
45 |
retriever=docsearch.as_retriever(search_kwargs={'k':1}),return_source_documents=True)
|
46 |
|
47 |
def predict(message, history):
|
48 |
+
'''experimenation with memory and conversation retrieval chain has resulted in less
|
49 |
+
performance, usefulness, and more halucination. Hence, this chat bot provides one
|
50 |
+
shot answers with zero memory. You can use the code in github notebooks to do this
|
51 |
+
experimentation. github.com/mehrdad-es/Amazon-But-Better'''
|
52 |
+
# history_langchain_format = []
|
53 |
+
# for human, ai in history:
|
54 |
+
# history_langchain_format.append(HumanMessage(content=human))
|
55 |
+
# history_langchain_format.append(AIMessage(content=ai))
|
56 |
+
# history_langchain_format.append(HumanMessage(content=message))
|
57 |
+
# gpt_response = qa({'query':history_langchain_format})
|
58 |
+
# return gpt_response['result']
|
59 |
+
# gpt_response = qa({'query':''.join(history)+f'.\n given the previous conversation respond using the following prompt:{message}'})
|
60 |
+
# # print(gpt_response)
|
61 |
+
# history.append((f'HumanMessage:{message}',f'AIMessage: {gpt_response},'))
|
62 |
+
# # history=history_langchain_format
|
63 |
+
# return gpt_response['result']
|
64 |
|
65 |
+
message=message+'? just the book title-Author'
|
66 |
+
result = qa({"query": message})
|
67 |
+
# r1=docsearch.similarity_search_with_score(query=q,k=3)
|
68 |
+
# print([(item[-2].metadata,item[-1]) for item in r1],\
|
69 |
+
# '\n\n',result['result'],f'|| {result["source_documents"][0].metadata}','\n*****\n')
|
70 |
+
if result['result'] not in ["I don't know","I don't know."]:
|
71 |
+
return result['result']+f'\n---\nAmazon Kindle ebook description is:\n {result["source_documents"][0].page_content}'+\
|
72 |
+
f'\nfrom this file: {result["source_documents"][0].metadata}'
|
73 |
+
else:
|
74 |
+
return result['result']
|
75 |
|
76 |
gr.ChatInterface(predict,
|
77 |
chatbot=gr.Chatbot(height='auto'),
|