ChatWithYourPDF / app.py
tdeshane's picture
Update app.py
95b96e9 verified
raw
history blame
No virus
5.34 kB
import os
from typing import List
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import (
ConversationalRetrievalChain,
)
from langchain.document_loaders import PyPDFLoader
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.docstore.document import Document
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
import chainlit as cl
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
system_template = """Use the following pieces of context to answer the users question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
ALWAYS return a "SOURCES" part in your answer.
The "SOURCES" part should be a reference to the source of the document from which you got your answer.
And if the user greets with greetings like Hi, hello, How are you, etc reply accordingly as well.
Example of your response should be:
The answer is foo
SOURCES: xyz
Begin!
----------------
{summaries}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
chain_type_kwargs = {"prompt": prompt}
def process_file(file: cl.AskFileMessage):
import tempfile
with tempfile.NamedTemporaryFile(mode="w", delete=False) as tempfile:
with open(tempfile.name, "wb") as f:
f.write(file.content)
pypdf_loader = PyPDFLoader(tempfile.name)
texts = pypdf_loader.load_and_split()
texts = [text.page_content for text in texts]
return texts
@cl.on_chat_start
async def on_chat_start():
files = None
# Wait for the user to upload a file
while files is None:
# Note: This now accepts both text/plain and application/pdf files
files = await cl.AskFileMessage(
content="Please upload a text or PDF file to begin!",
accept=["text/plain", "application/pdf"],
max_size_mb=20, # Assuming PDFs might be larger
timeout=180,
).send()
file = files[0]
# Notify the user that their file is being processed
msg = cl.Message(content=f"Processing `{file.name}`...")
await msg.send()
# Initialize an empty list for texts, this will be populated based on file type
texts = []
# Check the file type and process accordingly
if file.content_type == "text/plain":
# Handle text file
with open(file.path, "r", encoding="utf-8") as f:
text = f.read()
texts.append(text) # Add the text to the texts list
# Update the user about the text file
await cl.Message(
content=f"`{file.name}` uploaded, it contains {len(text)} characters!"
).send()
elif file.content_type == "application/pdf":
# Handle PDF file
texts = process_file(file) # Assuming process_file() is a function you've defined to extract text from PDF
# Create metadata for each chunk
metadatas = [{"source": f"{i}-pl"} for i in range(len(texts))]
# Create a Chroma vector store
embeddings = OpenAIEmbeddings()
docsearch = await cl.make_async(Chroma.from_texts)(
texts, embeddings, metadatas=metadatas
)
# The rest of your setup, like creating the chain, goes here
# This part is unchanged from your second snippet
message_history = ChatMessageHistory()
memory = ConversationBufferMemory(
memory_key="chat_history",
output_key="answer",
chat_memory=message_history,
return_messages=True,
)
chain = ConversationalRetrievalChain.from_llm(
ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True),
chain_type="stuff",
retriever=docsearch.as_retriever(),
memory=memory,
return_source_documents=True,
)
# Let the user know that the system is ready
msg.content = f"Processing `{file.name}` done. You can now ask questions!"
await msg.update()
cl.user_session.set("chain", chain)
@cl.on_message
async def main(message):
chain = cl.user_session.get("chain") # type: ConversationalRetrievalChain
cb = cl.AsyncLangchainCallbackHandler()
res = await chain.acall(message.content, callbacks=[cb])
answer = res["answer"]
source_documents = res["source_documents"] # type: List[Document]
text_elements = [] # type: List[cl.Text]
if source_documents:
for source_idx, source_doc in enumerate(source_documents):
source_name = f"source_{source_idx}"
# Create the text element referenced in the message
text_elements.append(
cl.Text(content=source_doc.page_content, name=source_name)
)
source_names = [text_el.name for text_el in text_elements]
if source_names:
answer += f"\nSources: {', '.join(source_names)}"
else:
answer += "\nNo sources found"
await cl.Message(content=answer, elements=text_elements).send()