import json
import re
from pathlib import Path
from typing import List, Dict
from collections import defaultdict

import psycopg2
from psycopg2.extras import execute_values
from langchain_core.documents import Document
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_text_splitters import MarkdownHeaderTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_milvus import Milvus
from langchain_ollama import OllamaEmbeddings

# PostgreSQL 配置
PG_HOST = "192.168.6.20"
PG_PORT = 5432
PG_USER = "ipp_air"
PG_PASSWORD = "Air@yutu123."
PG_DB = "yutu_ipp_air"


def connect_pg():
    """连接 PostgreSQL 并返回连接对象"""
    return psycopg2.connect(
        host=PG_HOST,
        port=PG_PORT,
        user=PG_USER,
        password=PG_PASSWORD,
        dbname=PG_DB
    )


def ingest_document_chunks(title: str, chunks: List[str]):
    conn = connect_pg()
    with conn.cursor() as cur:
        rows = [(chunk, title) for chunk in chunks]
        if rows:
            execute_values(
                cur,
                "INSERT INTO ipp_full_text_search_docs (text, title) VALUES %s",
                rows,
                page_size=100
            )
    conn.commit()
    conn.close()


class KnowledgeIndexer:
    def __init__(self, md_folder: str):
        self.md_folder = Path(md_folder)
        # 向量化模型
        self.embedder = OllamaEmbeddings(
            model="bge-m3:latest",
            base_url="http://192.168.7.3:11434"
        )
        # Milvus 向量库配置
        self.vectorstore = Milvus(
            embedding_function=self.embedder,
            collection_name="test",
            connection_args={
                "uri": "http://192.168.6.20:19530",
                "db_name": "ipp_air_general",
            },
            auto_id=True
        )
        # 分块器：由于MinerU提取的默认只有一级标题,按照一级标题拆,能最大程度保证段落上下文完整
        self.header_splitter = MarkdownHeaderTextSplitter(
            headers_to_split_on=[
                ("#", "一级标题"),
            ]
        )
        # 在每个一级标题区块内，再按字符长度切分
        self.chunk_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50
        )

    def load_all_md_documents(self) -> List[Document]:
        """
        加载所有 Markdown 文件，返回 Document 列表，每个 Document 包含 page_content 和 metadata.source_file
        """
        all_docs: List[Document] = []
        for file_path in self.md_folder.rglob("*.md"):
            loader = UnstructuredMarkdownLoader(str(file_path), mode="single")
            docs = loader.load()
            for doc in docs:
                doc.metadata = {"source_file": file_path.name}
                all_docs.append(doc)
        return all_docs

    def run(self):
        # 1. 加载 MD 文档
        documents = self.load_all_md_documents()
        print(f"📄 共加载 Markdown 文档数：{len(documents)}")

        # 2. 切分并构造 split_docs
        split_docs: List[Document] = []
        for doc in documents:
            title = doc.metadata.get("source_file", "")
            header_docs = self.header_splitter.split_text(doc.page_content)
            for header_doc in header_docs:
                header_text = header_doc.metadata.get("header", "")
                sub_chunks = self.chunk_splitter.split_text(header_doc.page_content)
                for chunk in sub_chunks:
                    combined_text = f"{header_text}\n{chunk}" if header_text else chunk
                    split_docs.append(
                        Document(page_content=combined_text, metadata={"title": title})
                    )
        print(f"📦 切分后片段数：{len(split_docs)}，准备入库...")

        # 3. 存入 Milvus 向量库
        self.vectorstore.add_documents(split_docs)
        print("✅ Milvus 入库完成！")

        # 4. 存入 PostgreSQL 表
        # 按 title 分组
        grouped: Dict[str, List[str]] = defaultdict(list)
        for doc in split_docs:
            title = doc.metadata.get("title", "")
            grouped[title].append(doc.page_content)
        for title, chunks in grouped.items():
            ingest_document_chunks(title, chunks)
            print(f"✅ PostgreSQL 入库完成：{title}, 分块数={len(chunks)}")
