import pdfplumber
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.documents import Document


class PDFHelper:

    CHUNK_SIZE=300
    CHUNK_OVERLAP=20

    @staticmethod
    def extract_all_text(pdf_path):
        all_text = ""
        try:
            with pdfplumber.open(pdf_path) as pdf:
                for pageNumber, pageData in enumerate(pdf.pages):
                    page_text = pageData.extract_text()
                    if page_text is not None:
                        all_text += page_text + "\n"
                    else:
                        all_text += "此页面没有可提取的内容" + ""
        except Exception as e:
            return f"处理PDF时出现错误:{e}"

        return all_text

    def pdf_to_docs(self,pdf_path,file_name):
        docs = []
        try:
            with pdfplumber.open(pdf_path) as pdf:
                for pageNumber, pageData in enumerate(pdf.pages):
                    page_text = pageData.extract_text()
                    doc = Document(page_content=page_text, metadata=dict({"file_name": file_name},**{
                        k:pdf.metadata[k] for k in pdf.metadata if isinstance(pdf.metadata[k],(str,int))
                    }))
                    docs.append(doc)
        except Exception as e:
            return f"处理PDF时出现错误:{e}"
        return docs

    def docs_to_chunks(self,docs):
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=self.CHUNK_SIZE,chunk_overlap=self.CHUNK_OVERLAP)
        texts = [doc.page_content for doc in docs]
        metadata = [doc.metadata for doc in docs]

        chunks = text_splitter.create_documents(texts,metadatas=metadata)
        return chunks