from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma
from langchain_community.document_loaders import Docx2txtLoader
from langchain_huggingface import HuggingFaceEmbeddings
import os
import re
import warnings

# 忽略LangChain的弃用警告
warnings.filterwarnings("ignore", category=DeprecationWarning, module="langchain_core")

# ================== 配置参数 ==================
DOCS_DIR = r"E:\2025summerJINLIUelectirc\ElecSolutionBB\data\中国电力知识百科全书"
VECTORDB_DIR = r"E:\2025summerJINLIUelectirc\ElecSolutionBB\VectorDB"
MODEL_PATH = r"E:\BAAIbge-small-zh-v1.5"

# 文件名到卷名的映射
FILE_TO_VOLUME = {
    "elec_basis": "map_electrical_basis",
    "power_system": "map_power_system",
    "nuclear_energy": "map_nuclear_energy",
    "transmission1": "map_transmission",
    "transmission2": "map_transmission",
    "electricity_use1": "map_electricity_use",
    "electricity_use2": "map_electricity_use",
    "comprehensive1": "map_comprehensive",
    "comprehensive2": "map_comprehensive"
}


# ================== 文档加载与分类 ==================
def load_and_classify_documents():
    """加载并分类文档"""
    print(f"📂 扫描目录: {DOCS_DIR}")
    all_files = [f for f in os.listdir(DOCS_DIR)
                 if f.endswith(('.docx', '.pdf')) and not f.startswith('中国电力百科全书')]
    print(f"🔍 找到 {len(all_files)} 个文件")

    # 分类文档
    volume_files = {}
    for file_name in all_files:
        # 提取基本文件名（不带扩展名）
        base_name = os.path.splitext(file_name)[0]

        # 获取映射的卷名
        if base_name in FILE_TO_VOLUME:
            volume_name = FILE_TO_VOLUME[base_name]

            # 添加到对应卷的文件列表
            if volume_name not in volume_files:
                volume_files[volume_name] = []

            file_path = os.path.join(DOCS_DIR, file_name)
            volume_files[volume_name].append(file_path)

    # 打印分类结果
    print("\n📊 文档分类结果:")
    for volume, files in volume_files.items():
        print(f"  - {volume}: {len(files)} 个文件")
        for file_path in files:
            print(f"    - {os.path.basename(file_path)}")

    return volume_files


# ================== 处理单个卷 ==================
def process_volume(volume_name, file_paths):
    """处理单个卷的文档"""
    print(f"\n{'=' * 50}")
    print(f"📚 正在处理: {volume_name}")
    print(f"📄 文件数量: {len(file_paths)}")

    # 创建向量数据库目录
    volume_db_dir = os.path.join(VECTORDB_DIR, volume_name)
    os.makedirs(volume_db_dir, exist_ok=True)

    # 加载嵌入模型
    embeddings = HuggingFaceEmbeddings(
        model_name=MODEL_PATH,
        model_kwargs={"local_files_only": True}
    )

    # 加载并处理文档
    all_chunks = []
    for file_path in file_paths:
        if file_path.lower().endswith('.docx'):
            try:
                print(f"  - 加载文件: {os.path.basename(file_path)}")
                loader = Docx2txtLoader(file_path)
                docs = loader.load()

                # 文本分块
                splitter = RecursiveCharacterTextSplitter(
                    chunk_size=500,
                    chunk_overlap=100,
                    length_function=len,
                    separators=["\n\n", "\n", "。", "？", "！", " ", ""]
                )
                chunks = splitter.split_documents(docs)
                all_chunks.extend(chunks)
                print(f"    → 生成 {len(chunks)} 个文本块")

            except Exception as e:
                print(f"❌ 错误: {e}")

    # 创建向量数据库
    if all_chunks:
        Chroma.from_documents(
            documents=all_chunks,
            embedding=embeddings,
            persist_directory=volume_db_dir,
            collection_name=volume_name,
            collection_metadata={"hnsw:space": "cosine"}
        )
        print(f"💾 向量数据库已保存: {volume_db_dir}")
        return len(all_chunks)
    else:
        print("⚠️ 未生成内容，跳过向量数据库创建")
        return 0


# ================== 查询接口 ==================
def query_kb(volume, question, k=3):
    """
    查询特定知识库

    参数:
        volume (str): 知识库名称（以"map_"开头）
        question (str): 查询文本
        k (int): 返回结果数量

    返回:
        相关文档列表或错误信息
    """
    volume_dir = os.path.join(VECTORDB_DIR, volume)
    if not os.path.exists(volume_dir):
        return [{"error": f"知识库 '{volume}' 不存在"}]

    try:
        # 加载嵌入模型
        embeddings = HuggingFaceEmbeddings(
            model_name=MODEL_PATH,
            model_kwargs={"local_files_only": True}
        )

        # 加载向量数据库
        vector_db = Chroma(
            persist_directory=volume_dir,
            embedding_function=embeddings,
            collection_name=volume
        )

        # 执行查询
        results = vector_db.similarity_search(question, k=k)

        # 清理并格式化结果
        formatted_results = []
        for doc in results:
            # 清理文本：去除多余空格和换行符
            clean_content = re.sub(r'\s+', ' ', doc.page_content).strip()
            # 只保留文件名
            source = os.path.basename(doc.metadata.get("source", "未知来源"))
            formatted_results.append({
                "内容": clean_content[:500] + "..." if len(clean_content) > 500 else clean_content,
                "来源": source
            })
        return formatted_results
    except Exception as e:
        return [{"error": f"查询错误: {str(e)}"}]


# ================== 主流程 ==================
if __name__ == "__main__":
    print("=" * 50)
    print("🔧 知识库构建系统")
    print("=" * 50)
    print(f"📂 文档目录: {DOCS_DIR}")
    print(f"🤖 向量库目录: {VECTORDB_DIR}")
    print(f"🧠 嵌入模型: {MODEL_PATH}")
    print("=" * 50)

    # 加载并分类文档
    volume_files = load_and_classify_documents()

    # 处理每个卷
    volume_stats = {}
    for volume_name, file_paths in volume_files.items():
        chunk_count = process_volume(volume_name, file_paths)
        volume_stats[volume_name] = chunk_count

    # 最终报告
    print("\n" + "=" * 50)
    print("✅ 知识库创建成功!")
    print("=" * 50)
    print("📚 知识库列表:")
    for volume, count in volume_stats.items():
        print(f"  - {volume}: {count} 个文本块")

    print("\n💡 API 使用方法:")
    print("query_kb('知识库名称', '查询问题')")
    print("可用知识库: " + ", ".join(sorted(volume_stats.keys())))
    print("=" * 50)

    # 测试查询
    print("\n🧪 测试查询:")
    test_question = "怎么余热发电?"
    test_results = query_kb("map_electricity_use", test_question, k=1)
    print(f"查询问题: {test_question}")

    # 打印格式化结果
    for i, res in enumerate(test_results, 1):
        if "error" in res:
            print(f"结果 {i}: ❌ 错误 - {res['error']}")
        else:
            print(f"结果 {i}:")
            print(f"  来源: {res['来源']}")
            print(f"  内容: {res['内容']}")
    print("=" * 50)