# ingest_documents.py

import os
import argparse
from typing import List

# --- 依赖库导入 ---
# Langchain 和文档加载器
from langchain.docstore.document import Document
# 新增 Markdown 加载器
from langchain_community.document_loaders import UnstructuredWordDocumentLoader, UnstructuredFileLoader, CSVLoader, UnstructuredMarkdownLoader

# 文本处理与向量化
# 假设这些模块在你的项目路径下
from chatchat.server.file_rag.text_splitter.chinese_text_splitter import ChineseTextSplitter
from sentence_transformers import SentenceTransformer

# Milvus 向量数据库
from pymilvus import connections, utility, Collection, FieldSchema, CollectionSchema, DataType

# --- 自定义加载器实现 (这部分代码保持不变) ---
# ... (从 "import numpy as np" 到 "raise RuntimeError(...)")
# --- 自定义加载器实现 (根据你提供的代码) ---
# 为了让脚本独立运行，这里直接包含了加载器类。在大型项目中，建议将它们移入一个单独的 `loaders` 目录。
import numpy as np
import tqdm
from PIL import Image
from io import BytesIO
import fitz  # PyMuPDF
from pptx import Presentation
import csv

# OCR 实例获取助手
def get_ocr():
    """获取 OCR 引擎实例"""
    try:
        from rapidocr_onnxruntime import RapidOCR
        return RapidOCR()
    except ImportError:
        raise ImportError("请先安装 rapidocr-onnxruntime: pip install rapidocr-onnxruntime")

class RapidOCRPDFLoader(UnstructuredFileLoader):
    """使用 RapidOCR 从 PDF 文件中的图片里提取文字的加载器"""
    def _get_elements(self) -> List:
        ocr = get_ocr()
        doc = fitz.open(self.file_path)
        resp = ""
        desc = f"处理 PDF: {os.path.basename(self.file_path)}"
        for page in tqdm.tqdm(doc, desc=desc):
            # 提取页面中的纯文本
            resp += page.get_text("text")
            # 提取并 OCR 页面中的图片
            img_list = page.get_images(full=True)
            for img_info in img_list:
                xref = img_info[0]
                base_image = doc.extract_image(xref)
                image_bytes = base_image["image"]
                img = Image.open(BytesIO(image_bytes))
                result, _ = ocr(np.array(img))
                if result:
                    ocr_result = [line[1] for line in result]
                    resp += "\n" + "\n".join(ocr_result)
        from unstructured.partition.text import partition_text
        return partition_text(text=resp, **self.unstructured_kwargs)

class RapidOCRLoader(UnstructuredFileLoader):
    """使用 RapidOCR 处理图片文件的加载器"""
    def _get_elements(self) -> List:
        ocr = get_ocr()
        result, _ = ocr(self.file_path)
        text = ""
        if result:
            text = "\n".join([line[1] for line in result])
        from unstructured.partition.text import partition_text
        return partition_text(text=text, **self.unstructured_kwargs)

class RapidOCRPPTLoader(UnstructuredFileLoader):
    """处理PPT文件，提取文本和OCR图片内容的加载器"""
    def _get_elements(self) -> List:
        ocr = get_ocr()
        prs = Presentation(self.file_path)
        resp = ""
        desc = f"处理 PPT: {os.path.basename(self.file_path)}"
        for slide in tqdm.tqdm(prs.slides, desc=desc):
            for shape in slide.shapes:
                if hasattr(shape, "text"):
                    resp += shape.text + "\n"
                if hasattr(shape, 'image'):
                    image = Image.open(BytesIO(shape.image.blob))
                    result, _ = ocr(np.array(image))
                    if result:
                        ocr_result = [line[1] for line in result]
                        resp += "\n".join(ocr_result) + "\n"
        from unstructured.partition.text import partition_text
        return partition_text(text=resp, **self.unstructured_kwargs)

class FilteredCSVLoader(CSVLoader):
    """自定义CSV加载器，默认加载所有列"""
    def load(self) -> List[Document]:
        docs = []
        try:
            with open(self.file_path, newline="", encoding=self.encoding or 'utf-8') as csvfile:
                csv_reader = csv.DictReader(csvfile)
                for i, row in enumerate(csv_reader):
                    content = "\n".join([f"{k}: {v}" for k, v in row.items()])
                    metadata = {"source": self.file_path, "row": i}
                    docs.append(Document(page_content=content, metadata=metadata))
        except Exception as e:
            raise RuntimeError(f"加载CSV文件失败 {self.file_path}: {e}")
        return docs
# --- 核心入库逻辑 (已修改) ---

def get_document_loader(file_path: str):
    """根据文件扩展名选择合适的文档加载器"""
    ext = os.path.splitext(file_path)[1].lower()
    if ext == ".pdf":
        return RapidOCRPDFLoader(file_path)
    elif ext in [".jpg", ".jpeg", ".png", ".bmp"]:
        return RapidOCRLoader(file_path)
    elif ext == ".docx":
        return UnstructuredWordDocumentLoader(file_path)
    elif ext == ".pptx":
        return RapidOCRPPTLoader(file_path)
    elif ext == ".csv":
        return FilteredCSVLoader(file_path)
    # 新增对 Markdown 文件的支持
    elif ext == ".md":
        return UnstructuredMarkdownLoader(file_path)
    else:
        # 如果不支持，返回 None，让调用者处理
        return None

def connect_milvus():
    """连接到 Milvus 数据库"""
    print(f"正在连接 Milvus: {config.MILVUS_HOST}:{config.MILVUS_PORT}...")
    connections.connect("default", host=config.MILVUS_HOST, port=config.MILVUS_PORT)
    print("Milvus 连接成功。")

def init_milvus_collection():
    """初始化 Milvus 集合（Collection），如果不存在则创建"""
    if utility.has_collection(config.MILVUS_COLLECTION):
        print(f"集合 '{config.MILVUS_COLLECTION}' 已存在。")
        return Collection(config.MILVUS_COLLECTION)
    
    print(f"正在创建集合: '{config.MILVUS_COLLECTION}'...")
    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535, description="文本块内容"),
        FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=1024, description="文档来源路径"),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=config.VECTOR_DIMENSION, description="文本向量")
    ]
    schema = CollectionSchema(fields, description="RAG 文档知识库集合")
    collection = Collection(config.MILVUS_COLLECTION, schema)
    
    print("正在为向量字段创建索引...")
    index_params = {
        "index_type": "IVF_FLAT",
        "metric_type": "IP",
        "params": {"nlist": 128}
    }
    collection.create_index(field_name="vector", index_params=index_params)
    print("索引创建成功。")
    return collection

def process_and_ingest(file_path: str, collection, model, splitter):
    """处理单个文件并将其入库到 Milvus"""
    try:
        print(f"\n--- 正在处理文件: {file_path} ---")
        loader = get_document_loader(file_path)
        if not loader:
            print(f"跳过不支持的文件类型: {file_path}")
            return 0
        
        # 加载和切分
        docs = loader.load()
        full_text = "\n\n".join([doc.page_content for doc in docs])
        chunks = splitter.split_text(full_text)
        
        if not chunks:
            print("未从文件中提取出任何文本块，跳过。")
            return 0

        # 向量化
        chunk_vectors = model.encode(chunks, show_progress_bar=False, normalize_embeddings=True)

        # 准备数据并插入
        entities = [
            {"text": text, "source": file_path, "vector": vector}
            for text, vector in zip(chunks, chunk_vectors)
        ]
        
        collection.insert(entities)
        print(f"成功将 {len(chunks)} 个文本块从 {os.path.basename(file_path)} 插入数据库。")
        return len(chunks)

    except Exception as e:
        print(f"处理文件 {file_path} 时发生错误: {e}")
        return 0

def main(root_path):
    """主执行函数，遍历目录并处理所有文件"""
    if not os.path.isdir(root_path):
        print(f"错误: 提供的路径不是一个目录 {root_path}")
        return

    try:
        # 1. 建立连接，初始化集合和模型
        connect_milvus()
        collection = init_milvus_collection()
        model = SentenceTransformer(config.EMBEDDING_MODEL)
        splitter = ChineseTextSplitter(chunk_size=config.CHUNK_SIZE, chunk_overlap=config.CHUNK_OVERLAP)

        total_chunks_ingested = 0
        # 2. 遍历目录及子目录下的所有文件
        for dirpath, _, filenames in os.walk(root_path):
            for filename in filenames:
                file_path = os.path.join(dirpath, filename)
                # 调用处理单个文件的函数
                chunks_count = process_and_ingest(file_path, collection, model, splitter)
                total_chunks_ingested += chunks_count

        # 3. 所有文件处理完毕后，刷新 Milvus
        if total_chunks_ingested > 0:
            print("\n所有文件处理完毕，正在刷新 Milvus 数据...")
            collection.flush()
            print("Milvus 数据刷新完成。")
            print(f"本次运行共计入库 {total_chunks_ingested} 个文本块。")
            print(f"当前集合中总实体数: {collection.num_entities}")
        else:
            print("\n本次运行没有新的文本块入库。")

    except Exception as e:
        import traceback
        print(f"\n发生严重错误: {e}")
        traceback.print_exc()
    finally:
        if "default" in connections.list_connections():
            connections.disconnect("default")
            print("\n已断开与 Milvus 的连接。")


if __name__ == "__main__":
    # 修改命令行参数，从 --file 改为 --path
    parser = argparse.ArgumentParser(description="将指定目录下的所有文档入库到 Milvus 用于 RAG。")
    parser.add_argument("--path", type=str, required=True, help="需要处理的文档所在的根目录路径。")
    args = parser.parse_args()
    
    try:
        import config
    except ImportError:
        print("错误: 未找到 config.py 文件。请创建该文件并填入您的配置。")
        exit(1)
        
    main(args.path)