Spaces:
Sleeping
Sleeping
Commit
•
bfaa73f
1
Parent(s):
a7320b9
Update app.py
Browse files
app.py
CHANGED
@@ -1,63 +1,75 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
)
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
)
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
"""
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
)
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
import fitz # PyMuPDF
|
4 |
+
import re
|
5 |
+
from langchain_openai.embeddings import OpenAIEmbeddings
|
6 |
+
from langchain_chroma import Chroma
|
7 |
+
from langchain.retrievers.multi_query import MultiQueryRetriever
|
8 |
+
from langchain.chains import ConversationalRetrievalChain
|
9 |
+
from langchain.memory import ConversationBufferMemory
|
10 |
+
from langchain_openai import ChatOpenAI
|
11 |
+
from langchain_experimental.text_splitter import SemanticChunker
|
12 |
|
13 |
+
# Place your OpenAI API key in a safe place, such as an environment variable or a secure vault
|
14 |
+
openai_api_key = "YOUR_OPENAI_API_KEY_HERE"
|
15 |
+
|
16 |
+
vectorstore = None
|
17 |
+
llm = None
|
18 |
+
qa_instance = None
|
19 |
+
chat_history = []
|
20 |
+
|
21 |
+
def extract_text_from_pdf(pdf_bytes):
|
22 |
+
document = fitz.open("pdf", pdf_bytes)
|
23 |
+
text = ""
|
24 |
+
for page_num in range(len(document)):
|
25 |
+
page = document.load_page(page_num)
|
26 |
+
text += page.get_text()
|
27 |
+
document.close()
|
28 |
+
return text
|
29 |
+
|
30 |
+
def clean_text(text):
|
31 |
+
cleaned_text = re.sub(r'\s+', ' ', text)
|
32 |
+
cleaned_text = re.sub(r'(.)\1{2,}', r'\1', cleaned_text)
|
33 |
+
cleaned_text = re.sub(r'\b(\w+)\b(?:\s+\1\b)+', r'\1', cleaned_text)
|
34 |
+
return cleaned_text.strip()
|
35 |
+
|
36 |
+
def initialize_chatbot(cleaned_text):
|
37 |
+
global vectorstore, llm, qa_instance
|
38 |
+
if vectorstore is None:
|
39 |
+
embeddings = OpenAIEmbeddings(api_key=openai_api_key)
|
40 |
+
text_splitter = SemanticChunker(embeddings)
|
41 |
+
docs = text_splitter.create_documents([cleaned_text])
|
42 |
+
vectorstore = Chroma.from_documents(documents=docs, embedding=embeddings)
|
43 |
+
if llm is None:
|
44 |
+
llm = ChatOpenAI(api_key=openai_api_key, temperature=0.5, model="gpt-4o", verbose=True)
|
45 |
+
retriever = MultiQueryRetriever.from_llm(retriever=vectorstore.as_retriever(), llm=llm)
|
46 |
+
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
47 |
+
qa_instance = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory)
|
48 |
+
|
49 |
+
def setup_qa_system(pdf_file):
|
50 |
+
if pdf_file is None:
|
51 |
+
return [("Please upload a PDF file.", "")]
|
52 |
+
extracted_text = extract_text_from_pdf(pdf_file)
|
53 |
+
cleaned_text = clean_text(extracted_text)
|
54 |
+
initialize_chatbot(cleaned_text)
|
55 |
+
chat_history = [("Chatbot initialized. Please ask a question.", "")]
|
56 |
+
return chat_history
|
57 |
+
|
58 |
+
def answer_query(question):
|
59 |
+
if qa_instance is None:
|
60 |
+
return [("Please upload a PDF and initialize the system first.", "")]
|
61 |
+
if not question.strip():
|
62 |
+
return [("Please enter a question.", "")]
|
63 |
+
result = qa_instance({"question": question})
|
64 |
+
chat_history.append((question, result['answer']))
|
65 |
+
return chat_history
|
66 |
+
|
67 |
+
with gr.Blocks() as demo:
|
68 |
+
upload = gr.File(label="Upload PDF", type="binary", file_types=["pdf"])
|
69 |
+
chatbot = gr.Chatbot(label="Chatbot")
|
70 |
+
question = gr.Textbox(label="Ask a question", placeholder="Type your question after uploading PDF...")
|
71 |
+
|
72 |
+
upload.change(setup_qa_system, inputs=[upload], outputs=[chatbot])
|
73 |
+
question.submit(answer_query, inputs=[question], outputs=[chatbot])
|
74 |
+
|
75 |
+
demo.launch()
|