File size: 12,335 Bytes
31393f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08b924e
 
 
31393f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08b924e
31393f2
 
 
08b924e
31393f2
 
 
 
 
 
 
08b924e
31393f2
08b924e
31393f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08b924e
 
 
31393f2
 
08b924e
 
 
31393f2
 
 
 
 
 
 
 
 
08b924e
31393f2
 
 
 
 
 
 
 
 
 
 
 
 
 
08b924e
31393f2
08b924e
31393f2
08b924e
31393f2
 
 
 
 
 
 
 
 
 
 
 
 
 
08b924e
31393f2
 
 
 
 
08b924e
31393f2
 
08b924e
31393f2
 
 
 
 
 
 
 
 
 
 
 
 
 
08b924e
 
 
 
 
 
 
 
 
31393f2
08b924e
 
 
 
 
 
 
 
 
31393f2
08b924e
 
 
31393f2
08b924e
31393f2
 
 
 
08b924e
31393f2
 
 
 
 
 
 
 
 
 
 
 
 
08b924e
31393f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08b924e
 
 
 
 
 
 
31393f2
 
 
 
 
08b924e
 
 
 
 
 
 
 
 
31393f2
 
08b924e
 
 
 
 
 
 
 
 
 
31393f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
# to-do: Enable downloading multiple patent PDFs via corresponding links 
import sys
import os
import re
import shutil
import time
import fitz 
import streamlit as st
import nltk
import tempfile
import subprocess

# Pin NLTK to version 3.9.1
REQUIRED_NLTK_VERSION = "3.9.1"
subprocess.run([sys.executable, "-m", "pip", "install", f"nltk=={REQUIRED_NLTK_VERSION}"])

# Set up temporary directory for NLTK resources
nltk_data_path = os.path.join(tempfile.gettempdir(), "nltk_data")
os.makedirs(nltk_data_path, exist_ok=True)
nltk.data.path.append(nltk_data_path)

# Download 'punkt_tab' for compatibility
try:
    print("Ensuring NLTK 'punkt_tab' resource is downloaded...")
    nltk.download("punkt_tab", download_dir=nltk_data_path)
except Exception as e:
    print(f"Error downloading NLTK 'punkt_tab': {e}")
    raise e

sys.path.append(os.path.abspath("."))
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.llms import OpenAI
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import NLTKTextSplitter
from patent_downloader import PatentDownloader
from langchain.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter

PERSISTED_DIRECTORY = tempfile.mkdtemp()

# Fetch API key securely from the environment
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
    st.error("Critical Error: OpenAI API key not found in the environment variables. Please configure it.")
    st.stop()

def check_poppler_installed():
    if not shutil.which("pdfinfo"):
        raise EnvironmentError(
            "Poppler is not installed or not in PATH. Install 'poppler-utils' for PDF processing."
        )

check_poppler_installed()

def load_docs(document_path):
    """
    Load and clean the PDF content, then split into chunks.
    """
    try:
        import fitz  # PyMuPDF for text extraction

        # Step 1: Extract plain text from PDF
        doc = fitz.open(document_path)
        extracted_text = []

        for page_num, page in enumerate(doc):
            page_text = page.get_text("text")  # Extract text
            clean_page_text = clean_extracted_text(page_text)
            if clean_page_text:  # Keep only non-empty cleaned text
                extracted_text.append(clean_page_text)

        doc.close()

        # Combine all pages into one text
        full_text = "\n".join(extracted_text)
        st.write(f"📄 Total Cleaned Text Length: {len(full_text)} characters")

        # Step 2: Chunk the cleaned text
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=100,
            separators=["\n\n", "\n", " ", ""]
        )
        split_docs = text_splitter.create_documents([full_text])

        # Debug: Show total chunks count and first 3 chunks for verification
        st.write(f"🔍 Total Chunks After Splitting: {len(split_docs)}")
        for i, doc in enumerate(split_docs[:3]):  # Show first 3 chunks only
            st.write(f"Chunk {i + 1}: {doc.page_content[:300]}...")

        return split_docs
    except Exception as e:
        st.error(f"Failed to load and process PDF: {e}")
        st.stop()


def clean_extracted_text(text):
    """
    Cleans extracted text to remove metadata, headers, and irrelevant content.
    """
    lines = text.split("\n")
    cleaned_lines = []

    for line in lines:
        line = line.strip()

        # Filter out lines with metadata patterns
        if (
            re.match(r"^(U\.S\.|United States|Sheet|Figure|References|Patent No|Date of Patent)", line)
            or re.match(r"^\(?\d+\)?$", line)  # Matches single numbers (page numbers)
            or "Examiner" in line
            or "Attorney" in line
            or len(line) < 30  # Skip very short lines
        ):
            continue

        cleaned_lines.append(line)

    return "\n".join(cleaned_lines)


def already_indexed(vectordb, file_name):
    indexed_sources = set(
        x["source"] for x in vectordb.get(include=["metadatas"])["metadatas"]
    )
    return file_name in indexed_sources

def load_chain(file_name=None):
    """
    Load cleaned PDF text, split into chunks, and update the vectorstore.
    """
    loaded_patent = st.session_state.get("LOADED_PATENT")

    # Debug: Show persist directory
    st.write(f"🗂 Using Persisted Directory: {PERSISTED_DIRECTORY}")

    vectordb = Chroma(
        persist_directory=PERSISTED_DIRECTORY,
        embedding_function=HuggingFaceEmbeddings(),
    )

    if loaded_patent == file_name or already_indexed(vectordb, file_name):
        st.write("✅ Already indexed.")
    else:
        st.write("🔄 Starting document processing and vectorstore update...")

        # Remove existing collection and load new docs
        vectordb.delete_collection()
        docs = load_docs(file_name)

        # Update vectorstore
        vectordb = Chroma.from_documents(
            docs, HuggingFaceEmbeddings(), persist_directory=PERSISTED_DIRECTORY
        )
        vectordb.persist()
        st.write("✅ Vectorstore successfully updated and persisted.")

        # Save loaded patent in session state
        st.session_state["LOADED_PATENT"] = file_name

    # Debug: Check vectorstore indexing summary
    indexed_docs = vectordb.get(include=["documents"])
    st.write(f"✅ Total Indexed Documents: {len(indexed_docs['documents'])}")

    # Test retrieval with a simple query
    retriever = vectordb.as_retriever(search_kwargs={"k": 3})
    test_query = "What is this document about?"
    results = retriever.get_relevant_documents(test_query)

    st.write("🔍 Test Retrieval Results for Query:")
    if results:
        for i, res in enumerate(results):
            st.write(f"Retrieved Doc {i + 1}: {res.page_content[:200]}...")
    else:
        st.warning("No documents retrieved for test query.")

    # Configure memory for conversation
    memory = ConversationBufferMemory(
        memory_key="chat_history",
        return_messages=True
    )

    return ConversationalRetrievalChain.from_llm(
        OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY),
        retriever,
        memory=memory
    )


def extract_patent_number(url):
    pattern = r"/patent/([A-Z]{2}\d+)"
    match = re.search(pattern, url)
    return match.group(1) if match else None

def download_pdf(patent_number):
    try:
        patent_downloader = PatentDownloader(verbose=True)
        output_path = patent_downloader.download(patents=patent_number, output_path=tempfile.gettempdir())
        return output_path[0]
    except Exception as e:
        st.error(f"Failed to download patent PDF: {e}")
        st.stop()

def preview_pdf(pdf_path, scale_factor=0.5):
    """
    Generate and display a resized preview of the first page of the PDF.
    Args:
        pdf_path (str): Path to the PDF file.
        scale_factor (float): Factor to reduce the image size (default is 0.5).
    Returns:
        str: Path to the resized image preview.
    """
    try:
        # Open the PDF and extract the first page
        doc = fitz.open(pdf_path)
        first_page = doc[0]
        
        # Apply scaling using a transformation matrix
        matrix = fitz.Matrix(scale_factor, scale_factor)  # Scale down the image
        pix = first_page.get_pixmap(matrix=matrix)  # Generate scaled image
        
        # Save the preview image
        temp_image_path = os.path.join(tempfile.gettempdir(), "pdf_preview.png")
        pix.save(temp_image_path)
        
        doc.close()
        return temp_image_path
    
    except Exception as e:
        st.error(f"Error generating PDF preview: {e}")
        return None


if __name__ == "__main__":
    st.set_page_config(
        page_title="Patent Chat: Google Patents Chat Demo",
        page_icon="📖",
        layout="wide",
        initial_sidebar_state="expanded",
    )
    st.header("📖 Patent Chat: Google Patents Chat Demo")

    # Input for Google Patent Link
    patent_link = st.text_area(
        "Enter Google Patent Link:", 
        value="https://patents.google.com/patent/US8676427B1/en", 
        height=90
    )

    # Initialize session state
    for key in ["LOADED_PATENT", "pdf_preview", "loaded_pdf_path", "chain", "messages"]:
        if key not in st.session_state:
            st.session_state[key] = None

    # Button to load and process patent
    if st.button("Load and Process Patent"):
        if not patent_link:
            st.warning("Please enter a valid Google patent link.")
            st.stop()

        # Extract patent number
        patent_number = extract_patent_number(patent_link)
        if not patent_number:
            st.error("Invalid patent link format.")
            st.stop()

        st.write(f"Patent number: **{patent_number}**")

        # File handling
        pdf_path = os.path.join(tempfile.gettempdir(), f"{patent_number}.pdf")
        if not os.path.isfile(pdf_path):
            with st.spinner("📥 Downloading patent file..."):
                try:
                    pdf_path = download_pdf(patent_number)
                    st.write(f"✅ File downloaded: {pdf_path}")
                except Exception as e:
                    st.error(f"Failed to download patent: {e}")
                    st.stop()
        else:
            st.write("✅ File already downloaded.")

        # Generate PDF preview only if not already displayed
        if not st.session_state.get("pdf_preview_displayed", False):
            with st.spinner("🖼️ Generating PDF preview..."):
                preview_image_path = preview_pdf(pdf_path, scale_factor=0.5)
                if preview_image_path:
                    st.session_state.pdf_preview = preview_image_path
                    st.image(preview_image_path, caption="First Page Preview", use_container_width=False)
                    st.session_state["pdf_preview_displayed"] = True
                else:
                    st.warning("Failed to generate PDF preview.")
                    st.session_state.pdf_preview = None

        # Load the document into the system
        with st.spinner("🔄 Loading document into the system..."):
            try:
                st.session_state.chain = load_chain(pdf_path)
                st.session_state.LOADED_PATENT = patent_number
                st.session_state.loaded_pdf_path = pdf_path
                st.session_state.messages = [{"role": "assistant", "content": "Hello! How can I assist you with this patent?"}]
                st.success("🚀 Document successfully loaded! You can now start asking questions.")
            except Exception as e:
                st.error(f"Failed to load the document: {e}")
                st.stop()

    # Display previous chat messages
    if st.session_state.messages:
        for message in st.session_state.messages:
            with st.chat_message(message["role"]):
                st.markdown(message["content"])

    # User input for questions
    if st.session_state.chain:
        if user_input := st.chat_input("What is your question?"):
            # User message
            st.session_state.messages.append({"role": "user", "content": user_input})
            with st.chat_message("user"):
                st.markdown(user_input)

            # Assistant response
            with st.chat_message("assistant"):
                message_placeholder = st.empty()
                full_response = ""

                with st.spinner("Generating response..."):
                    try:
                        # Generate response using the chain
                        assistant_response = st.session_state.chain({"question": user_input})
                        full_response = assistant_response.get("answer", "I'm sorry, I couldn't process that question.")
                    except Exception as e:
                        full_response = f"An error occurred: {e}"

                message_placeholder.markdown(full_response)
                st.session_state.messages.append({"role": "assistant", "content": full_response})
    else:
        st.info("Press the 'Load and Process Patent' button to start processing.")