from langchain.chains import RetrievalQA from langchain.chains import RetrievalQAWithSourcesChain from langchain.document_loaders import TextLoader from langchain.docstore.document import Document import openai from langchain.embeddings.openai import OpenAIEmbeddings from langchain.llms import OpenAI import cohere from langchain.embeddings.cohere import CohereEmbeddings from langchain.llms import Cohere from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import Chroma import os import pickle from tqdm import tqdm import gradio as gr # from langchain.memory import ConversationSummaryMemory # from langchain.chains import ConversationalRetrievalChain # from langchain.text_splitter import RecursiveCharacterTextSplitter # from langchain.chains import LLMChain # from langchain.prompts import ( # ChatPromptTemplate, # HumanMessagePromptTemplate, # MessagesPlaceholder, # SystemMessagePromptTemplate, # ) from langchain.schema import AIMessage,HumanMessage documents=[] path='./bios/' for file in os.listdir(path): loader = TextLoader(f'{path}{file}',encoding='unicode_escape') # loader.load()[0].metadata['category']='biography' # print(loader.load()[0].metadata) documents += loader.load() # print(documents) print(len(documents)) '''This is the code used for without memory chat''' text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = CohereEmbeddings(model='embed-english-v3.0') docsearch = Chroma.from_documents(texts, embeddings) qa = RetrievalQA.from_chain_type(llm=Cohere(model='command'), chain_type="stuff", \ retriever=docsearch.as_retriever(search_kwargs={'k':1}),return_source_documents=True) btuTuples=pickle.load(open('./bookTitleUrlTuples.pkl','rb')) bookTitleUrlDict={x:y for x,y in btuTuples} def predict(message, history): '''experimenation with memory and conversation retrieval chain has resulted in less performance, usefulness, and more halucination. Hence, this chat bot provides one shot answers with zero memory. You can use the code in github notebooks to do this experimentation. github.com/mehrdad-es/Amazon-But-Better''' # history_langchain_format = [] # for human, ai in history: # history_langchain_format.append(HumanMessage(content=human)) # history_langchain_format.append(AIMessage(content=ai)) # history_langchain_format.append(HumanMessage(content=message)) # gpt_response = qa({'query':history_langchain_format}) # return gpt_response['result'] # gpt_response = qa({'query':''.join(history)+f'.\n given the previous conversation respond using the following prompt:{message}'}) # # print(gpt_response) # history.append((f'HumanMessage:{message}',f'AIMessage: {gpt_response},')) # # history=history_langchain_format # return gpt_response['result'] message="you are a language model that gives book recommendation based on your context"+message+\ 'just give the book title and author' result = qa({"query": message}) # r1=docsearch.similarity_search_with_score(query=q,k=3) # print([(item[-2].metadata,item[-1]) for item in r1],\ # '\n\n',result['result'],f'|| {result["source_documents"][0].metadata}','\n*****\n') if result['result'] not in ["I don't know","I don't know."]: bookNamePath=result["source_documents"][0].metadata["source"] return result['result']+f'\n---\n***Ignore the description below if the chatbot was unsure about its response \ or if the response is not about the book shown below***\nAmazon Kindle ebook description is:\n {result["source_documents"][0].page_content}'+\ f'\n**from this file:** {bookNamePath}\n'+\ f'**link==>** {bookTitleUrlDict[bookNamePath.split("/")[-1][:-4]]}' else: return result['result'] gr.ChatInterface(predict, chatbot=gr.Chatbot(height='auto'), textbox=gr.Textbox(placeholder="Recommend a book on someone who..."), title="Amazon But Better", description="Amazon started out with selling books. However, searching books on \ Amazon is tedious and inaccurate if you don't know what you are exactly looking for. **Why not \ make it faster and easier with LLMs:).** This chatbot's context is based on all the non-sponsored \ Kindle ebooks found in the biography section of amazon.ca (1195 items).").launch() # gr.ChatInterface(predict).launch()