Classplus / app.py
Gaurav-2273's picture
Update app.py
45901b5 verified
# import gradio as gr
# import fitz # PyMuPDF
# from langchain.text_splitter import RecursiveCharacterTextSplitter
# from langchain.schema import Document
# from langchain_community.vectorstores import Chroma
# from langchain.embeddings import OpenAIEmbeddings
# from langchain.llms import OpenAI
# from langchain.prompts import PromptTemplate
# from langchain.memory import ConversationBufferMemory
# from langchain.chains import ConversationalRetrievalChain
# import os
# def extract_text_from_pdf(pdf_path):
# doc = fitz.open(pdf_path)
# text = ""
# for page_num in range(len(doc)):
# page = doc.load_page(page_num)
# text += page.get_text()
# return text
# # Load the text from the PDF and preprocess
# openai_api_key = os.getenv("OPENAI_API_KEY")
# pdf_path = "iess402.pdf" # Path to your PDF file
# pdf_text = extract_text_from_pdf(pdf_path)
# document = Document(page_content=pdf_text, metadata={})
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=25)
# all_splits = text_splitter.split_documents([document])
# # Create vector store and setup the QA chain
# vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings(api_key=openai_api_key))
# llm = OpenAI(api_key=openai_api_key, temperature=0, model="gpt-3.5-turbo-instruct", verbose=True)
# template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
# {context}
# Question: {question}
# Helpful Answer:"""
# QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"], template=template)
# # Setup conversational retrieval chain with memory
# memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# retriever = vectorstore.as_retriever()
# qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory)
# # Define the function to ask questions and get answers
# def ask_question(question):
# result = qa.invoke({"question": question})
# return result['answer']
# # Create the Gradio interface
# iface = gr.Interface(fn=ask_question, inputs="text", outputs="text", title="PDF QA System", description="Ask questions based Textbook in Political Science for Class IX chapter 2.")
# # Launch the Gradio interface
# iface.launch()
import gradio as gr
import fitz # PyMuPDF
import re
from pathlib import Path
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain_chroma import Chroma
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain_openai import ChatOpenAI
from langchain_experimental.text_splitter import SemanticChunker
import os
openai_api_key = os.getenv("OPENAI_API_KEY")
def extract_text_from_pdf(pdf_file):
document = fitz.open(pdf_file)
text = ""
for page_num in range(len(document)):
page = document.load_page(page_num)
text += page.get_text()
document.close()
return text
def clean_text(text):
cleaned_text = re.sub(r'\s+', ' ', text)
cleaned_text = re.sub(r'(.)\1{2,}', r'\1', cleaned_text)
cleaned_text = re.sub(r'\b(\w+)\b(?:\s+\1\b)+', r'\1', cleaned_text)
return cleaned_text.strip()
def initialize_chatbot(cleaned_text, openai_api_key):
embeddings = OpenAIEmbeddings(api_key=openai_api_key)
text_splitter = SemanticChunker(embeddings)
docs = text_splitter.create_documents([cleaned_text])
vectorstore = Chroma.from_documents(documents=docs, embedding=embeddings)
llm = ChatOpenAI(api_key=openai_api_key, temperature=0.5, model="gpt-4o", verbose=True)
retriever = MultiQueryRetriever.from_llm(retriever=vectorstore.as_retriever(), llm=llm)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory)
return qa
def answer_query(pdf_file, question):
extracted_text = extract_text_from_pdf(pdf_file)
cleaned_text = clean_text(extracted_text)
qa = initialize_chatbot(cleaned_text, openai_api_key)
result = qa({"question": question})
return result['answer']
def process_pdf_and_question(pdf_file, question, chat_history):
if pdf_file is None:
return chat_history + [("Please upload a PDF file.", "")]
if not question.strip():
return chat_history + [("Please enter a question.", "")]
answer = answer_query(pdf_file, question)
chat_history.append((question, answer))
return chat_history
with gr.Blocks() as demo:
upload = gr.File(label="Upload PDF")
chatbot = gr.Chatbot(label="Chat History")
question = gr.Textbox(label="Ask a question")
question.submit(process_pdf_and_question, inputs=[upload, question, chatbot], outputs=[chatbot])
if __name__ == "__main__":
demo.launch()