import os
import chromadb
import fitz
from chromadb.utils import embedding_functions
from sentence_transformers import SentenceTransformer
from tqdm import tqdm


class PDFBatchLoader:
    def __init__(self, pdf_dir):
        self.pdf_dir = pdf_dir
        self.file_list = self._get_pdf_list()

    def _get_pdf_list(self):
        """获取目录下所有PDF文件（类似Java的FileFilter）"""
        return [
            os.path.join(self.pdf_dir, f)
            for f in os.listdir(self.pdf_dir)
            if f.lower().endswith('.pdf')
        ]

    def load_all(self):
        """批量加载所有PDF文本（类似Java的批处理）"""
        all_text = []
        print(f"开始处理{len(self.file_list)}篇论文...")

        for pdf_path in tqdm(self.file_list, desc="PDF解析进度"):
            with fitz.open(pdf_path) as doc:
                text = ""
                for page in doc:
                    text += page.get_text()
                all_text.append({
                    "filename": os.path.basename(pdf_path),
                    "content": text
                })

        return all_text


# 使用示例（将/path/to/pdfs替换为你的PDF目录）
loader = PDFBatchLoader(r"D:\Users\Administrator\Desktop\graduation_design\hydrogen_related_documents\input_pdfs")
raw_data = loader.load_all()

import re
import jieba


class AcademicCleaner:
    def __init__(self):
        # 学术领域停用词表
        self.stopwords = {"etal", "ie", "eg", "fig", "table", "section", "参考文献", "reference", "©", "doi","摘要"}

        # 正则表达式模式（类似Java的Pattern）
        self.patterns = {
            "figure": re.compile(r'图\s?\d+[.-]?\d*', re.I),
            "equation": re.compile(r'\([A-Za-z]+\d+\)'),
            "url": re.compile(r'http[s]?://\S+')
        }

    def clean(self, text):
        """执行清洗流水线（类似Java的过滤器链）"""
        # 1. 移除特殊字符
        text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9\s.,;:?！，。？、]', '', text)

        # 2. 移除图表标注
        for pattern in self.patterns.values():
            text = pattern.sub('', text)

        # 3. 分词并过滤停用词（类似Java的Tokenizer）
        words = jieba.lcut(text)
        jieba.load_userdict("hydrogen_terms.txt")
        self.patterns["keep_formula"] = re.compile(r'\b([A-Z][a-z]?\d*){2,}\b')  # 匹配H2O, CO2
        filtered = [w for w in words if w not in self.stopwords and len(w) > 1]

        # 4. 重组段落
        return ' '.join(filtered)


# 使用示例
cleaner = AcademicCleaner()
cleaned_data = [cleaner.clean(item["content"]) for item in raw_data]

from langchain.text_splitter import RecursiveCharacterTextSplitter


class AcademicSplitter:
    def __init__(self):
        self.splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,  # 更适合m3e-base的最佳长度
            chunk_overlap=150,
            separators=["\n\n第", "\n\n", "。"]  # 更适合中文论文结构
        )

    def split(self, text):
        """执行分块处理（类似Java的批处理）"""
        return self.splitter.split_text(text)


from langchain_core.documents import Document

# 使用示例
splitter = AcademicSplitter()
chunks = []

for text in cleaned_data:
    # 将每个文本块封装为Document对象
    text_chunks = splitter.split(text)
    chunks.extend([Document(page_content=chunk) for chunk in text_chunks])
print(f"共生成{len(chunks)}个知识块")

model = SentenceTransformer('moka-ai/m3e-base')


class KnowledgeBaseBuilder:
    def __init__(self, persist_dir="./chroma_db"):  # Windows路径使用正斜杠
        self.embeddings = embedding_functions.SentenceTransformerEmbeddingFunction(
            model_name="moka-ai/m3e-base"
        )
        self.persist_dir = persist_dir

    def build(self, chunks):
        client = chromadb.PersistentClient(path="./chromadb")
        collection = client.get_or_create_collection(
            name="hydrogen_research",
            metadata={
                "hnsw:space": "cosine",
                "hnsw:M": 24,       # 邻居数（默认16），提升召回率
                "hnsw:ef_construction": 200  # 构建时搜索深度（默认100）
            }
        )
        collection.modify(metadata={"pq:enabled": True, "pq:segments": 64})

        # 批量插入优化
        batch_size = 500
        docs = [chunk.page_content for chunk in chunks]

        for i in range(0, len(docs), batch_size):
            batch = docs[i:i + batch_size]
            embeddings = model.encode(batch).tolist()
            collection.add(
                documents=batch,
                embeddings=embeddings,
                ids=[f"doc_{i + j}" for j in range(len(batch))]
            )

        print(f"总插入记录数：{collection.count()}")


# 使用示例
builder = KnowledgeBaseBuilder()
builder.build(chunks)
