DrishtiSharma commited on
Commit
31393f2
·
verified ·
1 Parent(s): c62b9bb

Create app.py

Browse files
Files changed (1) hide show
  1. interim/app.py +326 -0
interim/app.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # to-do: Enable downloading multiple patent PDFs via corresponding links
2
+ import sys
3
+ import os
4
+ import re
5
+ import shutil
6
+ import time
7
+ import fitz
8
+ import streamlit as st
9
+ import nltk
10
+ import tempfile
11
+ import subprocess
12
+
13
+ # Pin NLTK to version 3.9.1
14
+ REQUIRED_NLTK_VERSION = "3.9.1"
15
+ subprocess.run([sys.executable, "-m", "pip", "install", f"nltk=={REQUIRED_NLTK_VERSION}"])
16
+
17
+ # Set up temporary directory for NLTK resources
18
+ nltk_data_path = os.path.join(tempfile.gettempdir(), "nltk_data")
19
+ os.makedirs(nltk_data_path, exist_ok=True)
20
+ nltk.data.path.append(nltk_data_path)
21
+
22
+ # Download 'punkt_tab' for compatibility
23
+ try:
24
+ print("Ensuring NLTK 'punkt_tab' resource is downloaded...")
25
+ nltk.download("punkt_tab", download_dir=nltk_data_path)
26
+ except Exception as e:
27
+ print(f"Error downloading NLTK 'punkt_tab': {e}")
28
+ raise e
29
+
30
+ sys.path.append(os.path.abspath("."))
31
+ from langchain.chains import ConversationalRetrievalChain
32
+ from langchain.memory import ConversationBufferMemory
33
+ from langchain.llms import OpenAI
34
+ from langchain.document_loaders import UnstructuredPDFLoader
35
+ from langchain.vectorstores import Chroma
36
+ from langchain.embeddings import HuggingFaceEmbeddings
37
+ from langchain.text_splitter import NLTKTextSplitter
38
+ from patent_downloader import PatentDownloader
39
+ from langchain.document_loaders import PyMuPDFLoader
40
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
41
+
42
+ PERSISTED_DIRECTORY = tempfile.mkdtemp()
43
+
44
+ # Fetch API key securely from the environment
45
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
46
+ if not OPENAI_API_KEY:
47
+ st.error("Critical Error: OpenAI API key not found in the environment variables. Please configure it.")
48
+ st.stop()
49
+
50
+ def check_poppler_installed():
51
+ if not shutil.which("pdfinfo"):
52
+ raise EnvironmentError(
53
+ "Poppler is not installed or not in PATH. Install 'poppler-utils' for PDF processing."
54
+ )
55
+
56
+ check_poppler_installed()
57
+
58
+ def load_docs(document_path):
59
+ try:
60
+ import fitz # PyMuPDF for text extraction
61
+
62
+ # Step 1: Extract plain text from PDF
63
+ doc = fitz.open(document_path)
64
+ extracted_text = []
65
+
66
+ for page_num, page in enumerate(doc):
67
+ page_text = page.get_text("text") # Extract text
68
+ clean_page_text = clean_extracted_text(page_text)
69
+ if clean_page_text: # Keep only non-empty cleaned text
70
+ extracted_text.append(clean_page_text)
71
+
72
+ doc.close()
73
+
74
+ # Step 2: Combine cleaned text
75
+ full_text = "\n".join(extracted_text)
76
+ st.write(f"📄 Total Cleaned Text Length: {len(full_text)} characters")
77
+
78
+ # Step 3: Chunk the cleaned text
79
+ text_splitter = RecursiveCharacterTextSplitter(
80
+ chunk_size=1000,
81
+ chunk_overlap=100,
82
+ separators=["\n\n", "\n", " ", ""]
83
+ )
84
+ split_docs = text_splitter.create_documents([full_text])
85
+
86
+ # Debug: Show filtered chunks
87
+ st.write(f"🔍 Total Chunks After Splitting: {len(split_docs)}")
88
+ for i, doc in enumerate(split_docs[:5]): # Show first 5 chunks
89
+ st.write(f"Chunk {i + 1}: {doc.page_content[:300]}...")
90
+
91
+ return split_docs
92
+ except Exception as e:
93
+ st.error(f"Failed to load and process PDF: {e}")
94
+ st.stop()
95
+
96
+
97
+ def clean_extracted_text(text):
98
+ """
99
+ Cleans extracted text to remove metadata, headers, and irrelevant content.
100
+ """
101
+ lines = text.split("\n")
102
+ cleaned_lines = []
103
+
104
+ for line in lines:
105
+ line = line.strip()
106
+
107
+ # Filter out lines with metadata patterns
108
+ if (
109
+ re.match(r"^(U\.S\.|United States|Sheet|Figure|References|Patent No|Date of Patent)", line)
110
+ or re.match(r"^\(?\d+\)?$", line) # Matches single numbers (page numbers)
111
+ or "Examiner" in line
112
+ or "Attorney" in line
113
+ or len(line) < 30 # Skip very short lines
114
+ ):
115
+ continue
116
+
117
+ cleaned_lines.append(line)
118
+
119
+ return "\n".join(cleaned_lines)
120
+
121
+
122
+ def already_indexed(vectordb, file_name):
123
+ indexed_sources = set(
124
+ x["source"] for x in vectordb.get(include=["metadatas"])["metadatas"]
125
+ )
126
+ return file_name in indexed_sources
127
+
128
+ def load_chain(file_name=None):
129
+ loaded_patent = st.session_state.get("LOADED_PATENT")
130
+
131
+ # Debug: Check PERSISTED_DIRECTORY
132
+ st.write(f"Using Persisted Directory: {PERSISTED_DIRECTORY}")
133
+ vectordb = Chroma(
134
+ persist_directory=PERSISTED_DIRECTORY,
135
+ embedding_function=HuggingFaceEmbeddings(),
136
+ )
137
+
138
+ # Debug: Confirm already indexed
139
+ if loaded_patent == file_name or already_indexed(vectordb, file_name):
140
+ st.write("✅ Already indexed.")
141
+ else:
142
+ st.write("🔄 Starting document processing and vectorstore update...")
143
+
144
+ # Remove existing collection and load new docs
145
+ vectordb.delete_collection()
146
+ docs = load_docs(file_name)
147
+
148
+ # Debug: Verify text chunking
149
+ st.write(f"🔍 Number of Documents Loaded: {len(docs)}")
150
+ for i, doc in enumerate(docs[:5]): # Show first 5 chunks for debugging
151
+ st.write(f"Chunk {i + 1}: {doc.page_content[:200]}...")
152
+
153
+ # Update vectorstore
154
+ vectordb = Chroma.from_documents(
155
+ docs, HuggingFaceEmbeddings(), persist_directory=PERSISTED_DIRECTORY
156
+ )
157
+ vectordb.persist()
158
+ st.write("✅ Vectorstore successfully updated and persisted.")
159
+
160
+ # Save loaded patent in session state
161
+ st.session_state["LOADED_PATENT"] = file_name
162
+
163
+ # Debug: Check vectorstore indexing
164
+ indexed_docs = vectordb.get(include=["documents"])
165
+ st.write(f"✅ Indexed Documents in Vectorstore: {len(indexed_docs['documents'])}")
166
+ for i, doc in enumerate(indexed_docs["documents"][:3]): # Show first 3 indexed docs
167
+ st.write(f"Indexed Doc {i + 1}: {doc[:200]}...")
168
+
169
+ # Test retrieval with a sample query
170
+ retriever = vectordb.as_retriever(search_kwargs={"k": 3})
171
+ test_query = "What is this document about?"
172
+ results = retriever.get_relevant_documents(test_query)
173
+
174
+ # Debug: Verify document retrieval
175
+ st.write("🔍 Test Retrieval Results for Query:")
176
+ if results:
177
+ for i, res in enumerate(results):
178
+ st.write(f"Retrieved Doc {i + 1}: {res.page_content[:200]}...")
179
+ else:
180
+ st.warning("No documents retrieved for test query.")
181
+
182
+ # Configure memory for conversation
183
+ memory = ConversationBufferMemory(
184
+ memory_key="chat_history",
185
+ return_messages=True,
186
+ input_key="question",
187
+ output_key="answer",
188
+ )
189
+
190
+ return ConversationalRetrievalChain.from_llm(
191
+ OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY),
192
+ retriever,
193
+ return_source_documents=False,
194
+ memory=memory,
195
+ )
196
+
197
+ def extract_patent_number(url):
198
+ pattern = r"/patent/([A-Z]{2}\d+)"
199
+ match = re.search(pattern, url)
200
+ return match.group(1) if match else None
201
+
202
+ def download_pdf(patent_number):
203
+ try:
204
+ patent_downloader = PatentDownloader(verbose=True)
205
+ output_path = patent_downloader.download(patents=patent_number, output_path=tempfile.gettempdir())
206
+ return output_path[0]
207
+ except Exception as e:
208
+ st.error(f"Failed to download patent PDF: {e}")
209
+ st.stop()
210
+
211
+ def preview_pdf(pdf_path):
212
+ """Generate and display the first page of the PDF as an image."""
213
+ try:
214
+ doc = fitz.open(pdf_path) # Open PDF
215
+ first_page = doc[0] # Extract the first page
216
+ pix = first_page.get_pixmap() # Render page to a Pixmap (image)
217
+ temp_image_path = os.path.join(tempfile.gettempdir(), "pdf_preview.png")
218
+ pix.save(temp_image_path) # Save the image temporarily
219
+ return temp_image_path
220
+ except Exception as e:
221
+ st.error(f"Error generating PDF preview: {e}")
222
+ return None
223
+
224
+ if __name__ == "__main__":
225
+ st.set_page_config(
226
+ page_title="Patent Chat: Google Patents Chat Demo",
227
+ page_icon="📖",
228
+ layout="wide",
229
+ initial_sidebar_state="expanded",
230
+ )
231
+ st.header("📖 Patent Chat: Google Patents Chat Demo")
232
+
233
+ # Input for Google Patent Link
234
+ patent_link = st.text_area(
235
+ "Enter Google Patent Link:",
236
+ value="https://patents.google.com/patent/US8676427B1/en",
237
+ height=100
238
+ )
239
+
240
+ # Initialize session state
241
+ for key in ["LOADED_PATENT", "pdf_preview", "loaded_pdf_path", "chain", "messages"]:
242
+ if key not in st.session_state:
243
+ st.session_state[key] = None
244
+
245
+ # Button to load and process patent
246
+ if st.button("Load and Process Patent"):
247
+ if not patent_link:
248
+ st.warning("Please enter a valid Google patent link.")
249
+ st.stop()
250
+
251
+ # Extract patent number
252
+ patent_number = extract_patent_number(patent_link)
253
+ if not patent_number:
254
+ st.error("Invalid patent link format.")
255
+ st.stop()
256
+
257
+ st.write(f"Patent number: **{patent_number}**")
258
+
259
+ # File handling
260
+ pdf_path = os.path.join(tempfile.gettempdir(), f"{patent_number}.pdf")
261
+ if not os.path.isfile(pdf_path):
262
+ st.write("📥 Downloading patent file...")
263
+ try:
264
+ pdf_path = download_pdf(patent_number)
265
+ st.write(f"✅ File downloaded: {pdf_path}")
266
+ except Exception as e:
267
+ st.error(f"Failed to download patent: {e}")
268
+ st.stop()
269
+ else:
270
+ st.write("✅ File already downloaded.")
271
+
272
+ # Generate PDF preview only if not already displayed
273
+ if not st.session_state.get("pdf_preview_displayed", False):
274
+ st.write("🖼️ Generating PDF preview...")
275
+ preview_image_path = preview_pdf(pdf_path)
276
+ if preview_image_path:
277
+ st.session_state.pdf_preview = preview_image_path
278
+ st.image(preview_image_path, caption="First Page Preview", use_container_width=True)
279
+ st.session_state["pdf_preview_displayed"] = True
280
+ else:
281
+ st.warning("Failed to generate PDF preview.")
282
+ st.session_state.pdf_preview = None
283
+
284
+ # Load the document into the system
285
+ st.write("🔄 Loading document into the system...")
286
+ try:
287
+ st.session_state.chain = load_chain(pdf_path)
288
+ st.session_state.LOADED_PATENT = patent_number
289
+ st.session_state.loaded_pdf_path = pdf_path
290
+ st.session_state.messages = [{"role": "assistant", "content": "Hello! How can I assist you with this patent?"}]
291
+ st.success("🚀 Document successfully loaded! You can now start asking questions.")
292
+ except Exception as e:
293
+ st.error(f"Failed to load the document: {e}")
294
+ st.stop()
295
+
296
+ # Display previous chat messages
297
+ if st.session_state.messages:
298
+ for message in st.session_state.messages:
299
+ with st.chat_message(message["role"]):
300
+ st.markdown(message["content"])
301
+
302
+ # User input for questions
303
+ if st.session_state.chain:
304
+ if user_input := st.chat_input("What is your question?"):
305
+ # User message
306
+ st.session_state.messages.append({"role": "user", "content": user_input})
307
+ with st.chat_message("user"):
308
+ st.markdown(user_input)
309
+
310
+ # Assistant response
311
+ with st.chat_message("assistant"):
312
+ message_placeholder = st.empty()
313
+ full_response = ""
314
+
315
+ with st.spinner("Generating response..."):
316
+ try:
317
+ # Generate response using the chain
318
+ assistant_response = st.session_state.chain({"question": user_input})
319
+ full_response = assistant_response.get("answer", "I'm sorry, I couldn't process that question.")
320
+ except Exception as e:
321
+ full_response = f"An error occurred: {e}"
322
+
323
+ message_placeholder.markdown(full_response)
324
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
325
+ else:
326
+ st.info("Press the 'Load and Process Patent' button to start processing.")