import os
import pandas as pd
import re
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma
from langchain_huggingface import HuggingFaceEmbeddings
import warnings

# 忽略LangChain的弃用警告
warnings.filterwarnings("ignore", category=DeprecationWarning, module="langchain_core")

# ================== 配置参数 ==================
SAFETY_REGULATIONS_DIR = r"E:\2025summerJINLIUelectirc\ElecSolutionBB\data\电业安全工作规程"
VECTORDB_DIR = r"E:\2025summerJINLIUelectirc\ElecSolutionBB\VectorDB"
MODEL_PATH = r"E:\BAAIbge-small-zh-v1.5"

# 向量库集合名称
COLLECTION_NAME = "map_safety_regulations"

# ================== 文本分块配置 ==================
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=500,
    chunk_overlap=100,
    length_function=len,
    separators=["\n\n", "\n", "。", "？", "！", " ", ""]
)


# ================== 处理Excel题库 ==================
def process_safety_regulations_excel(file_path):
    print(f"📂 开始处理Excel文件: {file_path}")

    # 读取所有工作表
    try:
        all_sheets = pd.read_excel(file_path, sheet_name=None, header=1)
        print(f"✅ 成功读取Excel文件，包含 {len(all_sheets)} 个工作表")
    except Exception as e:
        print(f"❌ Excel文件读取失败: {e}")
        return []

    all_chunks = []
    total_questions = 0

    # 处理每个工作表
    for sheet_name, df in all_sheets.items():
        print(f"\n📊 处理工作表: {sheet_name} ({len(df)} 行)")

        # 创建列名映射字典 - 包含所有需要的列
        col_map = {
            "question": None,  # 题干列
            "options": [],  # 选项列
            "answer": None,  # 答案列
            "knowledge": None,  # 知识类别
            "keywords": None,  # 关键词
            "category": None,  # 专业小类
            "regulation": None  # 制度名称及条款内容
        }

        # 处理列名 - 确保列名是字符串
        df.columns = [str(col).strip() for col in df.columns]

        # 识别列（基于列名关键词）
        for col in df.columns:
            col_lower = col.lower()

            # 识别题干列
            if "题干" in col_lower and not col_map["question"]:
                col_map["question"] = col

            # 识别答案列
            if "答案" in col_lower and not col_map["answer"]:
                col_map["answer"] = col

            # 识别知识类别列
            if "知识类别" in col_lower and not col_map["knowledge"]:
                col_map["knowledge"] = col

            # 识别关键词列
            if "试题关键词" in col_lower and not col_map["keywords"]:
                col_map["keywords"] = col

            # 识别专业小类列
            if "专业小类" in col_lower and not col_map["category"]:
                col_map["category"] = col

            # 识别制度名称及条款内容列
            if "制度名称及条款内容" in col and not col_map["regulation"]:
                col_map["regulation"] = col

            # 识别选项列（A-I）
            if re.match(r"^[a-i]$", col_lower):
                col_map["options"].append(col)

        print(f"  列映射: {col_map}")

        # 处理每一行题目
        for idx, row in df.iterrows():
            if idx % 50 == 0 and len(df) > 100:
                print(f"  处理进度: {idx + 1}/{len(df)}")

            # 构建题目文本
            question_text = f"题型: {sheet_name}\n"

            # 添加知识类别
            if col_map["knowledge"] and col_map["knowledge"] in row:
                question_text += f"知识类别: {row[col_map['knowledge']]}\n"

            # 添加专业小类
            if col_map["category"] and col_map["category"] in row:
                question_text += f"专业小类: {row[col_map['category']]}\n"

            # 添加题干
            if col_map["question"] and col_map["question"] in row:
                question_text += f"题干: {row[col_map['question']]}\n"

            # 添加选项
            option_text = ""
            for option_col in sorted(col_map["options"]):
                if option_col in row and pd.notna(row[option_col]) and row[option_col] != "":
                    option_text += f"{option_col}. {row[option_col]}\n"

            if option_text:
                question_text += f"选项:\n{option_text}"

            # 添加答案
            if col_map["answer"] and col_map["answer"] in row:
                question_text += f"答案: {row[col_map['answer']]}\n"

            # 添加关键词
            if col_map["keywords"] and col_map["keywords"] in row:
                question_text += f"试题关键词: {row[col_map['keywords']]}\n"

            # 添加制度名称及条款内容
            if col_map["regulation"] and col_map["regulation"] in row:
                question_text += f"制度名称及条款内容: {row[col_map['regulation']]}\n"

            # 文本分块
            chunks = text_splitter.split_text(question_text)
            for chunk in chunks:
                all_chunks.append({
                    "text": chunk,
                    "metadata": {
                        "source": f"{os.path.basename(file_path)}|{sheet_name}|q{idx + 1}",
                        "row": idx + 1,
                        "sheet": sheet_name,
                        "file": os.path.basename(file_path)
                    }
                })

            total_questions += 1

    print(f"✅ 文件处理完成，共提取 {total_questions} 道题目，生成 {len(all_chunks)} 个文本块")
    return all_chunks


# ================== 构建向量库 ==================
def build_vector_db(chunks, collection_name):
    # 创建向量数据库目录
    vector_db_path = os.path.join(VECTORDB_DIR, collection_name)
    os.makedirs(vector_db_path, exist_ok=True)

    # 加载嵌入模型
    embeddings = HuggingFaceEmbeddings(
        model_name=MODEL_PATH,
        model_kwargs={"local_files_only": True}
    )

    # 准备文档和元数据
    documents = [chunk["text"] for chunk in chunks]
    metadatas = [chunk["metadata"] for chunk in chunks]

    # 创建向量数据库
    vector_db = Chroma.from_texts(
        texts=documents,
        embedding=embeddings,
        metadatas=metadatas,
        persist_directory=vector_db_path,
        collection_name=collection_name
    )

    print(f"💾 向量数据库已保存到: {vector_db_path}")
    return vector_db


# ================== 查询接口 ==================
def query_kb2(question, collection_name, k=3):
    """查询安全规程知识库"""
    vector_db_path = os.path.join(VECTORDB_DIR, collection_name)

    # 检查向量库是否存在
    if not os.path.exists(vector_db_path):
        print(f"❌ 向量库 {collection_name} 不存在")
        return [{"error": f"知识库 '{collection_name}' 不存在"}]

    try:
        # 加载嵌入模型
        embeddings = HuggingFaceEmbeddings(
            model_name=MODEL_PATH,
            model_kwargs={"local_files_only": True}
        )

        # 加载向量数据库
        vector_db = Chroma(
            persist_directory=vector_db_path,
            embedding_function=embeddings,
            collection_name=collection_name
        )

        # 执行查询
        results = vector_db.similarity_search(question, k=k)

        # 格式化结果
        formatted_results = []
        for i, doc in enumerate(results):
            content = doc.page_content.replace("\n", "  ")
            source = doc.metadata.get("source", "未知来源")
            file_name = doc.metadata.get("file", "未知文件")
            formatted_results.append({
                "序号": i + 1,
                "内容": content[:300] + "..." if len(content) > 300 else content,
                "来源": source,
                "文件": file_name
            })

        return formatted_results

    except Exception as e:
        return [{"error": f"查询错误: {str(e)}"}]


# ================== 主流程 ==================
if __name__ == "__main__":
    print("=" * 50)
    print("🔧 安全规程知识库构建系统")
    print("=" * 50)
    print(f"📂 安全规程文档目录: {SAFETY_REGULATIONS_DIR}")
    print(f"🤖 向量库目录: {VECTORDB_DIR}")
    print(f"🧠 嵌入模型: {MODEL_PATH}")
    print("=" * 50)

    # 获取所有Excel文件
    excel_files = []
    for root, dirs, files in os.walk(SAFETY_REGULATIONS_DIR):
        for file in files:
            if file.endswith('.xlsx'):
                excel_files.append(os.path.join(root, file))

    print(f"📊 找到 {len(excel_files)} 个Excel文件")

    # 处理所有Excel文件
    all_chunks = []
    for file_path in excel_files:
        print(f"\n{'=' * 40}")
        print(f"🔍 处理文件: {os.path.basename(file_path)}")
        chunks = process_safety_regulations_excel(file_path)
        all_chunks.extend(chunks)
        print(f"📦 当前总文本块数: {len(all_chunks)}")

    # 构建向量库
    if all_chunks:
        print(f"\n{'=' * 40}")
        print(f"🛠️ 开始构建向量库，共 {len(all_chunks)} 个文本块")
        build_vector_db(all_chunks, COLLECTION_NAME)


    print("=" * 50)
    print(f"✅ 知识库构建完成! 共处理 {len(excel_files)} 个文件，生成 {len(all_chunks)} 个文本块")