import os
import re
import json
import time
import shutil
import argparse
from dataclasses import dataclass, asdict
from typing import List, Dict, Any, Tuple
from datetime import datetime
from pathlib import Path

import paddle
from paddleocr import PPStructureV3
from tqdm import tqdm
from langchain.docstore.document import Document
from langchain.text_splitter import MarkdownHeaderTextSplitter
from langchain_experimental.text_splitter import SemanticChunker
from langchain_huggingface import HuggingFaceEmbeddings

# 引入Milvus相关库
from pymilvus import (
    connections,
    CollectionSchema,
    FieldSchema,
    DataType,
    Collection,
    utility
)


# ==================== 全局配置（统一管理路径与参数） ====================
# 模型配置（语义分块+向量生成用）
EMBEDDING_MODEL_NAME = "BAAI/bge-small-zh-v1.5"
EMBEDDING_MODEL_LOCAL_DIR = "huggingface_models/bge-small-zh-v1.5"
VECTOR_DIM = 384  # BGE-small-zh的输出维度固定为384

# Markdown分块配置
HEADER_SPLIT_RULES = [("#", "Header 1"), ("##", "Header 2"), ("###", "Header 3")]
SEMANTIC_BUFFER_SIZE = 1  # 语义分块缓冲大小（1=细粒度，适配行业报告文本）
MIN_VALID_CHAR_COUNT = 50  # 有效分块最小字符数（过滤无意义文本）

# Milvus配置
MILVUS_HOST = "localhost"
MILVUS_PORT = "19530"
MILVUS_COLLECTION_NAME = "industry_report_chunks"
# ==================================================================


# ==================== 1. PDF转Markdown相关定义 ====================
@dataclass
class PDF2MDConfig:
    """PDF转Markdown配置（适配子类文件夹）"""
    max_cpu_threads: int = 16  # 8核32G推荐线程数
    enable_mkldnn: bool = True  # MKLDNN加速
    pdf_abs_path: str = ""  # 单个PDF绝对路径（运行时赋值）
    source_root_abs_path: str = ""  # 源PDF根目录（如/opt/test/行业报告）
    md_output_root_abs_path: str = ""  # Markdown输出根目录（如/opt/test/行业报告全量处理结果）


class PDF2MarkdownProcessor:
    def __init__(self, config: PDF2MDConfig):
        self.config = config
        self.ppstructure: PPStructureV3 = None
        self.result_save_dir = self._get_result_save_dir()  # 复刻源目录的Markdown保存路径
        print(f"🔧 初始化PDF转Markdown处理器：源PDF={self.config.pdf_abs_path}")
        print(f"   🔗 Markdown保存目录：{self.result_save_dir}（复刻源文件子目录）")
        self._pre_check()
        self._init_device()
        self._init_ppstructure()

    def _get_result_save_dir(self) -> str:
        """核心：根据源PDF路径，在Markdown输出根目录重建相同子目录结构"""
        pdf_path = Path(self.config.pdf_abs_path)
        source_root_path = Path(self.config.source_root_abs_path)
        md_output_root_path = Path(self.config.md_output_root_abs_path)
        
        # 计算PDF相对于源根目录的相对路径（剥离根目录前缀）
        pdf_rel_path = pdf_path.relative_to(source_root_path)
        # 重建路径：Markdown输出根目录 + 源子目录 + PDF文件名（无后缀）
        result_dir = md_output_root_path / pdf_rel_path.parent / pdf_rel_path.stem
        return str(result_dir)

    def _pre_check(self):
        """前置校验：版本、依赖、路径、线程数"""
        print(f"\n📋 开始PDF转Markdown前置校验...")
        try:
            # 版本校验
            required_paddle = "3.1.0"
            required_ocr = "3.2.0"
            print(f"  🔍 版本校验：PaddlePaddle={required_paddle}，PaddleOCR≥{required_ocr}")
            assert paddle.__version__ == required_paddle, \
                f"需PaddlePaddle {required_paddle}，当前：{paddle.__version__}"
            
            import paddleocr
            assert paddleocr.__version__ >= required_ocr, \
                f"需PaddleOCR ≥{required_ocr}，当前：{paddleocr.__version__}"
            print(f"  ✅ 版本校验通过：PaddlePaddle={paddle.__version__}，PaddleOCR={paddleocr.__version__}")

            # 依赖校验
            print(f"  🔍 依赖校验：检查pdf2image、opencv-python、numpy...")
            missing_libs = []
            try:
                from pdf2image import convert_from_path
                print(f"    ✅ pdf2image已安装")
            except ImportError:
                missing_libs.append("pdf2image")
            try:
                import cv2
                import numpy as np
                print(f"    ✅ opencv-python、numpy已安装")
            except ImportError:
                missing_libs.append("opencv-python numpy")

            if missing_libs:
                raise ImportError(
                    f"缺失依赖：{', '.join(missing_libs)}\n"
                    f"安装：pip install {' '.join(missing_libs)} -i https://pypi.tuna.tsinghua.edu.cn/simple"
                )
            print(f"  ✅ 依赖校验通过：所有必需库已安装")

            # 线程数校验
            print(f"  🔍 线程数校验：当前设置={self.config.max_cpu_threads}（推荐1-32）")
            if not (1 <= self.config.max_cpu_threads <= 32):
                raise ValueError(f"线程数需在1-32之间，当前：{self.config.max_cpu_threads}")
            print(f"  ✅ 线程数校验通过：{self.config.max_cpu_threads}线程（适配8核32G）")

            # 路径校验
            print(f"  🔍 路径校验：源PDF+Markdown保存目录...")
            if not os.path.exists(self.config.pdf_abs_path):
                raise FileNotFoundError(f"源PDF不存在：{self.config.pdf_abs_path}")
            print(f"    ✅ 源PDF路径有效：{self.config.pdf_abs_path}")
            
            # 自动创建Markdown保存目录（含子文件夹）
            Path(self.result_save_dir).mkdir(parents=True, exist_ok=True)
            print(f"    ✅ Markdown保存目录就绪：{self.result_save_dir}（已自动创建）")

            print(f"✅ PDF转Markdown前置校验通过")

        except Exception as e:
            fix_cmd = (
                f"pip install paddlepaddle=={required_paddle} paddleocr>={required_ocr} "
                f"pdf2image opencv-python numpy -i https://pypi.tuna.tsinghua.edu.cn/simple && "
                f"apt-get install -y poppler-utils"
            )
            raise RuntimeError(f"校验失败：{str(e)}\n修复命令：\n{fix_cmd}")

    def _init_device(self):
        """CPU设备初始化"""
        print(f"\n⚙️  初始化CPU设备...")
        paddle.set_device("cpu")
        print(f"✅ CPU设备初始化完成（线程数：{self.config.max_cpu_threads}，MKLDNN加速：{self.config.enable_mkldnn}）")

    def _init_ppstructure(self):
        """PPStructureV3初始化（仅支持基础参数）"""
        print(f"\n📦 初始化PPStructureV3...")
        print(f"  🔧 初始化参数：lang=ch，cpu_threads={self.config.max_cpu_threads}，enable_mkldnn={self.config.enable_mkldnn}")
        try:
            self.ppstructure = PPStructureV3(
                lang="ch",
                cpu_threads=self.config.max_cpu_threads,
                enable_mkldnn=self.config.enable_mkldnn
            )
            print(f"✅ PPStructureV3初始化完成")

        except Exception as e:
            # 模型缺失手动下载命令
            model_cmd = (
                "wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar -P ~/.paddlex/official_models/ && "
                "wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar -P ~/.paddlex/official_models/ && "
                "wget https://paddleocr.bj.bcebos.com/PP-StructureV3/chinese/ch_ppstructure_mobile_v3.0_SLANet_infer.tar -P ~/.paddlex/official_models/ && "
                "tar -xvf ~/.paddlex/official_models/ch_PP-OCRv3_det_infer.tar -C ~/.paddlex/official_models/ && "
                "tar -xvf ~/.paddlex/official_models/ch_PP-OCRv3_rec_infer.tar -C ~/.paddlex/official_models/ && "
                "tar -xvf ~/.paddlex/official_models/ch_ppstructure_mobile_v3.0_SLANet_infer.tar -C ~/.paddlex/official_models/"
            )
            raise RuntimeError(f"初始化失败：{str(e)}\n模型缺失可执行：\n{model_cmd}")

    def process_pdf(self) -> tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
        """处理单个PDF，输出Markdown列表与图片列表"""
        pdf_filename = Path(self.config.pdf_abs_path).name
        print(f"\n📄 [1/2] 处理PDF：{pdf_filename}")
        print(f"  🔧 处理配置：线程数={self.config.max_cpu_threads}，自动识别文本/图片")
        output_results = self.ppstructure.predict(
            input=self.config.pdf_abs_path,
            return_original=True
        )

        markdown_list = []
        markdown_images = []
        total_pages = len(output_results)
        print(f"  📑 PDF总页数：{total_pages}（逐页处理...）")

        for page_idx, res in enumerate(output_results, 1):
            print(f"    📄 处理第 {page_idx}/{total_pages} 页...")
            md_info = res.markdown
            if md_info:
                markdown_list.append(md_info)
                page_images = md_info.get("markdown_images", {})
                markdown_images.append(page_images)
                print(f"      ✅ 第 {page_idx} 页完成：文本块={len(md_info.get('rec_texts', []))}，图片数={len(page_images)}")
            else:
                markdown_list.append({})
                markdown_images.append({})
                print(f"      ⚠️  第 {page_idx} 页无有效内容（跳过）")

        print(f"\n✅ PDF处理完成：有效页数={len(markdown_list)}，总图片数={sum(len(i) for i in markdown_images)}")
        return markdown_list, markdown_images

    def save_results(self, markdown_list, markdown_images):
        """保存Markdown文件与图片（按源目录结构）"""
        print(f"\n💾 [2/2] 保存Markdown结果到：{self.result_save_dir}")
        pdf_stem = Path(self.config.pdf_abs_path).stem

        # 保存Markdown
        print(f"  📝 保存Markdown文件...")
        md_text = self.ppstructure.concatenate_markdown_pages(markdown_list)
        md_path = Path(self.result_save_dir) / f"{pdf_stem}.md"
        with open(md_path, "w", encoding="utf-8") as f:
            f.write(md_text)
        print(f"  ✅ Markdown保存成功：{md_path}（大小：{os.path.getsize(md_path)} 字节）")

        # 保存图片
        print(f"  📷 保存图片（共{sum(len(i) for i in markdown_images)}张）...")
        total_saved = 0
        for page_idx, img_dict in enumerate(markdown_images, 1):
            if not img_dict:
                continue
            for img_idx, (rel_path, img) in enumerate(img_dict.items(), 1):
                img_path = Path(self.result_save_dir) / rel_path
                img_path.parent.mkdir(parents=True, exist_ok=True)
                img.save(img_path)
                total_saved += 1
                print(f"    ✅ 图片 {total_saved}：第{page_idx}页-{img_idx}张 → {img_path}")

        if total_saved > 0:
            print(f"  ✅ 图片保存完成：共 {total_saved} 张")
        else:
            print(f"  ℹ️  无图片需保存")

    def clean_temp(self):
        """临时文件清理提示"""
        print(f"\n🧹 清理临时文件...")
        print(f"✅ 临时文件由PPStructureV3自动管理（无需手动清理）")

    def run(self) -> str:
        """执行PDF转Markdown，返回生成的Markdown文件路径"""
        start_time = time.time()
        pdf_filename = Path(self.config.pdf_abs_path).name
        print("=" * 80)
        print("🎉 PDF转Markdown任务启动")
        print(f"📄 源PDF：{self.config.pdf_abs_path}")
        print(f"💾 Markdown输出目录：{self.result_save_dir}")
        print(f"⏰ 启动时间：{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        print("=" * 80)

        try:
            markdown_list, markdown_images = self.process_pdf()
            self.save_results(markdown_list, markdown_images)
            self.clean_temp()

            cost_time = round(time.time() - start_time, 2)
            md_path = Path(self.result_save_dir) / f"{Path(self.config.pdf_abs_path).stem}.md"
            print(f"\n" + "=" * 80)
            print(f"🎉 PDF转Markdown完成！")
            print(f"⏱️  耗时：{cost_time} 秒")
            print(f"📊 结果：Markdown文件={md_path}")
            print("=" * 80)
            return str(md_path)  # 返回Markdown路径，供后续分块使用

        except Exception as e:
            self.clean_temp()
            print(f"\n" + "=" * 80)
            print(f"💥 PDF转Markdown失败！")
            print(f"❌ 错误：{str(e)}")
            print("=" * 80)
            raise  # 抛出错误，便于上层批量处理捕获


# ==================== 2. Markdown清洗与分块相关定义 ====================
@dataclass
class ChunkResult:
    """分块结果（严格匹配用户指定格式）"""
    id: str                      # 全局唯一分块ID
    text: str                    # 清洗后的分块文本
    metadata: Dict[str, Any]     # 元数据（含行业分类）
    char_count: int              # 文本字符数
    title_path: str              # 标题路径（行业层级+Markdown标题）
    clean_status: str            # 清洗状态（success/warning）


class MarkdownCleanChunker:
    def __init__(self, md_output_root: str, final_output_root: str):
        self.md_output_root = Path(md_output_root).resolve()  # Markdown根目录（PDF转MD的输出）
        self.final_output_root = Path(final_output_root).resolve()  # 最终分块JSON输出根目录
        self.embedding_model = None  # 语义分块用嵌入模型
        self.semantic_splitter = None  # 语义分块器
        self.global_chunk_counter = 1  # 全局分块ID计数器
        self.create_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        self.total_processed_md = 0  # 已处理Markdown文件数
        self.total_valid_chunks = 0  # 有效分块总数

    def _is_valid_model_dir(self, model_dir: str) -> bool:
        """检查嵌入模型目录完整性"""
        required_files = ["config.json", "modules.json", "pytorch_model.bin", "tokenizer_config.json", "vocab.txt"]
        return all(os.path.exists(os.path.join(model_dir, f)) for f in required_files)

    def _load_embedding_model(self):
        """加载/下载BGE嵌入模型（国内镜像加速）"""
        print(f"\n🔍 初始化语义分块模型：{EMBEDDING_MODEL_NAME}")
        os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

        # 检查本地模型
        if os.path.exists(EMBEDDING_MODEL_LOCAL_DIR):
            if self._is_valid_model_dir(EMBEDDING_MODEL_LOCAL_DIR):
                print(f"✅ 加载本地模型：{EMBEDDING_MODEL_LOCAL_DIR}")
                return EMBEDDING_MODEL_LOCAL_DIR
            else:
                print(f"⚠️ 本地模型残缺，清理后重新下载")
                shutil.rmtree(EMBEDDING_MODEL_LOCAL_DIR)

        # 下载模型
        try:
            from huggingface_hub import snapshot_download
            os.makedirs(Path(EMBEDDING_MODEL_LOCAL_DIR).parent, exist_ok=True)
            snapshot_download(
                repo_id=EMBEDDING_MODEL_NAME,
                local_dir=EMBEDDING_MODEL_LOCAL_DIR,
                local_dir_use_symlinks=False,
                ignore_patterns=["*.onnx", "*.tflite"],
                max_workers=4
            )
            print(f"✅ 模型下载完成：{EMBEDDING_MODEL_LOCAL_DIR}")
            return EMBEDDING_MODEL_LOCAL_DIR
        except ImportError:
            raise ImportError("❌ 缺少依赖：pip install huggingface_hub")
        except Exception as e:
            raise RuntimeError(f"模型下载失败：{str(e)}")

    def initialize(self):
        """初始化模型与分块器"""
        print(f"\n" + "=" * 80)
        print("📌 启动Markdown清洗与分块模块")
        print(f"   - Markdown根目录：{self.md_output_root}")
        print(f"   - 最终分块输出目录：{self.final_output_root}")
        print("=" * 80)

        # 加载嵌入模型
        model_path = self._load_embedding_model()
        print("🔄 加载嵌入模型（CPU模式适配）...")
        self.embedding_model = HuggingFaceEmbeddings(
            model_name=model_path,
            model_kwargs={"device": "cpu", "batch_size": 8, "normalize_embeddings": True}
        )

        # 初始化语义分块器
        self.semantic_splitter = SemanticChunker(
            embeddings=self.embedding_model,
            buffer_size=SEMANTIC_BUFFER_SIZE,
            add_start_index=True  # 保留文本在原Markdown的起始位置
        )
        print(f"✅ 语义分块器初始化完成（缓冲大小：{SEMANTIC_BUFFER_SIZE}）")

    def _extract_industry_from_md_path(self, md_file_path: str) -> str:
        """从Markdown文件路径提取行业分类（纯路径层级）"""
        md_path = Path(md_file_path).resolve()
        # 计算Markdown相对于「Markdown输出根目录」的相对路径
        md_rel_path = md_path.relative_to(self.md_output_root)
        # 提取文件夹层级（如“传媒娱乐/直播”），用“-”连接为行业分类
        folder_levels = list(md_rel_path.parent.parts)
        if not folder_levels:
            return "未分类"
        return "-".join(folder_levels)

    def _clean_text(self, text: str) -> tuple[str, str]:
        """文本清洗：去除无效内容，返回清洗后文本与状态"""
        if not isinstance(text, str) or len(text.strip()) == 0:
            return "", "warning: 空文本"
        # 去除多余空行、空格、不可见控制符
        clean_text = re.sub(r'\n+', '\n', text.strip())
        clean_text = re.sub(r'\s+', ' ', clean_text)
        clean_text = re.sub(r'[\x00-\x1F\x7F]', '', clean_text)
        # 过滤过短文本
        if len(clean_text) < MIN_VALID_CHAR_COUNT:
            return "", f"warning: 文本过短（{len(clean_text)}字符<{MIN_VALID_CHAR_COUNT}字符）"
        return clean_text, "success"

    def _build_title_path(self, industry: str, header_metadata: Dict[str, str]) -> str:
        """构建标题路径：行业分类 + Markdown标题层级（无Emoji）"""
        # 提取Markdown标题（#/##/###）
        md_headers = []
        for level in ["Header 1", "Header 2", "Header 3"]:
            if level in header_metadata and header_metadata[level].strip():
                md_headers.append(header_metadata[level].strip())
        # 拼接路径
        if not md_headers:
            return f"{industry} > 未命名章节"
        return f"{industry} > " + " > ".join(md_headers)

    def _get_header_level(self, header_metadata: Dict[str, str]) -> int:
        """获取Markdown标题层级（1=/#，2=##，3=###）"""
        if "Header 1" in header_metadata:
            return 1
        elif "Header 2" in header_metadata:
            return 2
        elif "Header 3" in header_metadata:
            return 3
        else:
            return 0

    def process_single_md(self, md_file_path: str) -> List[ChunkResult]:
        """处理单个Markdown文件，返回分块结果列表"""
        md_file = Path(md_file_path)
        md_filename = md_file.name
        chunk_results = []

        try:
            # 读取Markdown文件
            print(f"\n📄 处理Markdown：{md_filename}")
            print(f"   🔗 源路径：{md_file_path}")
            with open(md_file_path, 'r', encoding='utf-8') as f:
                raw_text = f.read()
            if len(raw_text.strip()) < 100:
                print(f"⚠️ 跳过过小文件：{md_filename}（字符数<100）")
                return []

            # 1. 提取行业分类（从路径）
            industry_category = self._extract_industry_from_md_path(md_file_path)
            print(f"   📊 路径提取行业：{industry_category}")

            # 2. 按Markdown标题粗分
            header_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=HEADER_SPLIT_RULES)
            header_docs = header_splitter.split_text(raw_text)
            print(f"   📑 Markdown标题块数：{len(header_docs)}（开始语义分块）")
            if len(header_docs) == 0:
                print(f"⚠️ 无有效标题块：{md_filename}（无法分块）")
                return []

            # 3. 语义分块+清洗
            for header_doc in tqdm(header_docs, desc=f"分块进度：{md_filename}", leave=False):
                # 语义细分
                semantic_docs = self.semantic_splitter.split_documents([header_doc])
                if len(semantic_docs) == 0:
                    continue

                # 逐个处理语义分块
                for sem_doc in semantic_docs:
                    # 文本清洗
                    raw_chunk = sem_doc.page_content
                    clean_text, clean_status = self._clean_text(raw_chunk)
                    if clean_status != "success":
                        continue

                    # 构建元数据（严格匹配用户指定格式）
                    chunk_metadata = {
                        "source_file": md_filename,  # 源Markdown文件名
                        "source_path": md_file_path,  # 源Markdown完整路径
                        "industry_category": industry_category,  # 路径提取的行业分类
                        "header_level": self._get_header_level(header_doc.metadata),  # Markdown标题层级
                        "start_index": sem_doc.metadata.get("start_index", 0),  # 原文件起始位置
                        "create_time": self.create_time  # 处理时间
                    }

                    # 构建标题路径
                    title_path = self._build_title_path(industry_category, header_doc.metadata)

                    # 生成分块结果
                    chunk = ChunkResult(
                        id=f"chunk_{self.global_chunk_counter:06d}",  # 6位全局ID
                        text=clean_text,
                        metadata=chunk_metadata,
                        char_count=len(clean_text),
                        title_path=title_path,
                        clean_status=clean_status
                    )
                    chunk_results.append(chunk)
                    self.global_chunk_counter += 1

            # 统计
            self.total_processed_md += 1
            self.total_valid_chunks += len(chunk_results)
            print(f"✅ Markdown处理完成：{md_filename}（有效分块：{len(chunk_results)}）")
            return chunk_results

        except Exception as e:
            print(f"❌ Markdown处理失败：{md_filename}（错误：{str(e)}）")
            return []

    def batch_process_md(self) -> List[ChunkResult]:
        """批量处理所有Markdown文件，生成最终JSON，并返回所有分块结果"""
        # 扫描Markdown根目录下所有.md文件（含子文件夹）
        print(f"\n🔍 扫描Markdown文件：{self.md_output_root}（含所有子文件夹）")
        md_file_list = list(self.md_output_root.rglob("*.md"))
        if len(md_file_list) == 0:
            raise FileNotFoundError(f"❌ 未找到Markdown文件：{self.md_output_root}")
        print(f"✅ 共发现 {len(md_file_list)} 个Markdown文件")

        # 批量处理
        all_chunks = []
        for md_file in tqdm(md_file_list, desc="总分块进度（Markdown文件数）"):
            file_chunks = self.process_single_md(str(md_file))
            if file_chunks:
                all_chunks.extend(file_chunks)

        # 保存最终JSON结果
        self.final_output_root.mkdir(parents=True, exist_ok=True)
        json_filename = f"industry_report_chunks_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
        json_path = self.final_output_root / json_filename

        # 转换为字典列表（JSON序列化）
        all_chunks_dict = [asdict(chunk) for chunk in all_chunks]
        with open(json_path, 'w', encoding='utf-8') as f:
            json.dump(all_chunks_dict, f, ensure_ascii=False, indent=2)

        # 总统计
        print(f"\n" + "=" * 80)
        print(f"🎉 Markdown清洗分块批量处理完成！")
        print(f"📊 统计：")
        print(f"   - 处理Markdown文件数：{self.total_processed_md}/{len(md_file_list)}")
        print(f"   - 生成有效分块数：{self.total_valid_chunks}")
        print(f"   - 最终JSON路径：{json_path}")
        print("=" * 80)
        
        return all_chunks


# ==================== 3. Milvus向量存储模块 ====================
class MilvusVectorStore:
    def __init__(self, host: str = MILVUS_HOST, port: str = MILVUS_PORT, collection_name: str = MILVUS_COLLECTION_NAME):
        self.host = host
        self.port = port
        self.collection_name = collection_name
        self.collection = None
        self.embedding_model = None

    def connect(self):
        """连接Milvus服务器"""
        try:
            connections.connect(alias="default", host=self.host, port=self.port)
            print(f"✅ 成功连接Milvus（{self.host}:{self.port}），版本：{utility.get_server_version()}")
        except Exception as e:
            raise RuntimeError(f"Milvus连接失败：{str(e)}\n请检查Milvus服务是否启动（默认端口19530）")

    def create_collection(self):
        """创建适合存储行业报告分块的集合"""
        # 定义字段（增加chunk_id关联原始分块ID，便于查询后关联文本）
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="chunk_id", dtype=DataType.VARCHAR, max_length=20),  # 关联ChunkResult的id
            FieldSchema(name="vec", dtype=DataType.FLOAT_VECTOR, dim=VECTOR_DIM)
        ]
        schema = CollectionSchema(fields=fields, description="行业报告分块向量集合")

        # 若集合已存在则删除
        if utility.has_collection(self.collection_name):
            utility.drop_collection(self.collection_name)
            print(f"ℹ️  已删除现有集合：{self.collection_name}")

        self.collection = Collection(name=self.collection_name, schema=schema)
        print(f"✅ 创建集合：{self.collection_name}（向量维度：{VECTOR_DIM}）")

    def create_index(self):
        """创建适合HDD存储的索引"""
        if not self.collection:
            raise RuntimeError("请先创建集合")
            
        index_params = {
            "index_type": "IVF_FLAT",
            "metric_type": "L2",
            "params": {"nlist": 128}  # HDD推荐配置，nlist=128-1024
        }
        self.collection.create_index(field_name="vec", index_params=index_params)
        print(f"✅ 创建IVF_FLAT索引（参数：{index_params}）")

    def insert_vectors(self, chunks: List[ChunkResult], embedding_model: HuggingFaceEmbeddings) -> Tuple[int, int]:
        """将分块文本转换为向量并插入Milvus"""
        if not chunks:
            print("⚠️ 无分块数据可插入")
            return (0, 0)
            
        self.embedding_model = embedding_model
        total = len(chunks)
        success_count = 0
        vectors = []
        chunk_ids = []

        print(f"\n📥 开始向Milvus插入{total}个分块向量...")
        for chunk in tqdm(chunks, desc="向量生成与插入"):
            try:
                # 生成向量
                vec = embedding_model.embed_query(chunk.text)
                if len(vec) != VECTOR_DIM:
                    print(f"⚠️ 向量维度不匹配（预期{VECTOR_DIM}，实际{len(vec)}）：{chunk.id}")
                    continue
                
                vectors.append(vec)
                chunk_ids.append(chunk.id)
                success_count += 1
            except Exception as e:
                print(f"⚠️ 向量生成失败（{chunk.id}）：{str(e)}")
                continue

        # 批量插入
        if vectors:
            insert_data = [chunk_ids, vectors]
            insert_res = self.collection.insert(insert_data)
            print(f"✅ 插入完成：成功{success_count}/{total}条，主键范围：{min(insert_res.primary_keys)}-{max(insert_res.primary_keys)}")
        else:
            print("⚠️ 无有效向量可插入")

        return (success_count, total)

    def search_demo(self, limit: int = 5):
        """演示相似向量查询"""
        if not self.collection:
            raise RuntimeError("请先创建集合并插入数据")
            
        self.collection.load()
        print(f"\n🔍 随机查询{limit}个相似向量...")

        # 随机取一个已插入的向量作为查询向量
        try:
            # 先查询一条数据作为示例
            result = self.collection.query(expr="id >= 0", limit=1, output_fields=["vec"])
            if not result:
                print("⚠️ 集合中无数据，无法演示查询")
                return
                
            query_vec = [result[0]["vec"]]
            
            # 执行查询
            search_params = {
                "metric_type": "L2",
                "params": {"nprobe": 10}  # 查询参数，nprobe越大精度越高但速度越慢
            }
            search_res = self.collection.search(
                data=query_vec,
                anns_field="vec",
                param=search_params,
                limit=limit,
                output_fields=["chunk_id"]
            )

            # 打印结果
            print(f"\n📊 相似查询结果（Top{limit}）：")
            for i, res in enumerate(search_res[0]):
                print(f"  第{i+1}名：分块ID={res.entity.get('chunk_id')}，距离={round(res.distance, 4)}")
                
        except Exception as e:
            print(f"⚠️ 查询演示失败：{str(e)}")

    def close(self):
        """断开Milvus连接"""
        connections.disconnect("default")
        print("\n✅ 已断开Milvus连接")


# ==================== 4. 整体流程调度（PDF→MD→分块→Milvus连贯执行） ====================
def main():
    # 命令行参数（统一管理所有输入输出路径）
    parser = argparse.ArgumentParser(description="PDF→Markdown→清洗分块→Milvus向量存储 完整流程工具")
    # 核心路径参数
    parser.add_argument("--source-root", type=str, required=True,
                      help="源PDF根目录（含所有子文件夹，如'/opt/test/行业报告'）")
    parser.add_argument("--md-output-root", type=str, default="/opt/test/行业报告全量处理结果",
                      help="PDF转Markdown的输出根目录（默认：/opt/test/行业报告全量处理结果）")
    parser.add_argument("--final-output-root", type=str, default="/opt/test/行业报告分块结果",
                      help="最终分块JSON与Milvus向量的输出根目录（默认：/opt/test/行业报告分块结果）")
    # 辅助参数
    parser.add_argument("--max-threads", type=int, default=16,
                      help="PDF转Markdown的CPU线程数（默认：16，8核推荐16）")
    args = parser.parse_args()

    # 路径预处理（转为绝对路径，避免混乱）
    source_root = Path(args.source_root).resolve()
    md_output_root = Path(args.md_output_root).resolve()
    final_output_root = Path(args.final_output_root).resolve()

    # 打印整体任务配置
    print("=" * 100)
    print("🎉 PDF→Markdown→清洗分块→Milvus向量存储 完整流程启动")
    print(f"📌 任务配置：")
    print(f"   - 源PDF根目录：{source_root}")
    print(f"   - Markdown输出目录：{md_output_root}")
    print(f"   - 最终分块输出目录：{final_output_root}")
    print(f"   - CPU线程数：{args.max_threads}")
    print(f"   - Milvus地址：{MILVUS_HOST}:{MILVUS_PORT}")
    print(f"   - Milvus集合：{MILVUS_COLLECTION_NAME}")
    print(f"⏰ 启动时间：{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print("=" * 100)

    try:
        # -------------------------- 阶段1：批量PDF转Markdown --------------------------
        print(f"\n" + "=" * 80)
        print("📝 阶段1：批量PDF转Markdown")
        print("=" * 80)

        # 扫描源根目录下所有PDF（含子文件夹）
        print(f"🔍 扫描源PDF：{source_root}（含所有子文件夹）")
        pdf_files = list(source_root.rglob("*.pdf"))
        if len(pdf_files) == 0:
            raise FileNotFoundError(f"❌ 未找到PDF文件：{source_root}")
        print(f"✅ 共发现 {len(pdf_files)} 个PDF文件")

        # 批量处理每个PDF
        for idx, pdf_file in enumerate(pdf_files, 1):
            pdf_abs_path = str(pdf_file.resolve())
            print(f"\n" + "=" * 80)
            print(f"📄 处理第 {idx}/{len(pdf_files)} 个PDF：")
            print(f"   源路径：{pdf_abs_path}")
            print("=" * 80)

            # 初始化PDF转Markdown配置
            pdf2md_config = PDF2MDConfig(
                pdf_abs_path=pdf_abs_path,
                source_root_abs_path=str(source_root),
                md_output_root_abs_path=str(md_output_root),
                max_cpu_threads=args.max_threads
            )

            # 执行PDF转Markdown
            try:
                processor = PDF2MarkdownProcessor(pdf2md_config)
                processor.run()
            except Exception as e:
                print(f"⚠️  第 {idx} 个PDF跳过（错误：{str(e)}），继续处理下一个")
                continue

        # -------------------------- 阶段2：批量Markdown清洗分块 --------------------------
        print(f"\n" + "=" * 80)
        print("📝 阶段2：批量Markdown清洗与分块")
        print("=" * 80)

        # 初始化分块处理器
        chunker = MarkdownCleanChunker(
            md_output_root=str(md_output_root),
            final_output_root=str(final_output_root)
        )
        chunker.initialize()

        # 执行批量分块，获取所有分块结果
        all_chunks = chunker.batch_process_md()

        # -------------------------- 阶段3：分块向量存入Milvus --------------------------
        print(f"\n" + "=" * 80)
        print("📝 阶段3：分块向量存入Milvus")
        print("=" * 80)

        # 初始化Milvus存储
        milvus_store = MilvusVectorStore(
            host=MILVUS_HOST,
            port=MILVUS_PORT,
            collection_name=MILVUS_COLLECTION_NAME
        )
        milvus_store.connect()
        milvus_store.create_collection()
        milvus_store.create_index()

        # 插入向量（使用分块模块的嵌入模型）
        success_count, total_count = milvus_store.insert_vectors(
            chunks=all_chunks,
            embedding_model=chunker.embedding_model
        )

        # 演示相似查询
        milvus_store.search_demo(limit=5)

        # 关闭Milvus连接
        milvus_store.close()

        # -------------------------- 整体完成 --------------------------
        print(f"\n" + "=" * 100)
        print("🎉 完整流程全部完成！")
        print(f"📊 最终总统计：")
        print(f"   - 源PDF根目录：{source_root}")
        print(f"   - 处理PDF数：{len(pdf_files)}（成功{chunker.total_processed_md}个）")
        print(f"   - 生成Markdown数：{chunker.total_processed_md}")
        print(f"   - 生成有效分块数：{chunker.total_valid_chunks}")
        print(f"   - 成功入库向量数：{success_count}/{total_count}")
        print(f"   - Markdown目录：{md_output_root}")
        print(f"   - 分块JSON目录：{final_output_root}")
        print(f"   - Milvus集合：{MILVUS_COLLECTION_NAME}")
        print(f"⏰ 整体耗时：{round(time.time() - start_time, 2)} 秒")
        print("=" * 100)

    except Exception as e:
        print(f"\n" + "=" * 100)
        print(f"💥 完整流程失败！")
        print(f"❌ 错误原因：{str(e)}")
        print("=" * 100)
        exit(1)


if __name__ == "__main__":
    start_time = time.time()
    # 屏蔽冗余日志（仅显示错误）
    import logging
    logging.basicConfig(level=logging.ERROR)
    # 补充依赖校验
    try:
        import re
        from typing import Tuple  # 确保Tuple类型可用
    except ImportError:
        print("❌ 缺少依赖：pip install regex")
        exit(1)
    # 启动整体流程
    main()