from qac_generater import qac_generate
from create_base_v3 import create_q_a_base, read_qa_json, delete_qa_base
from es import (
    create_es_connection,
    load_mapping,
    create_index,
    store_chunks,
    delete_es_index,
)
from QueryProcessor import QueryProcessor
import os
import json
import yaml
import torch
import urllib3
from pathlib import Path
from typing import Dict, Any, Tuple, List, Optional
from enum import Enum


class PipelineMode(Enum):
    FULL = "full"  # 完整pipeline：生成QAC->建立知识库->创建索引->查询
    QUERY_ONLY = "query_only"  # 仅查询模式
    BUILD_AND_QUERY = "build_and_query"  # 建立知识库并查询，但不生成新的QAC
    DELETE_ALL = "delete_all"  # 删除知识库和ES索引


class Pipeline:
    def __init__(self, config_path: str):
        """初始化Pipeline
        Args:
            config_path: 配置文件路径
        """
        self.configs = self._load_config(config_path)
        self.device = self._setup_device(self.configs["USE_GPU_NUM"])
        self.faiss_database_path = Path("SimpleRAG/data/results")

        self.query_processor = None  # 延迟初始化
        urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

    def _load_config(self, config_path: str) -> Dict[str, Any]:
        """加载配置文件"""
        return yaml.safe_load(open(config_path))

    def _setup_device(self, num_str) -> torch.device:
        """设置GPU设备"""
        os.environ["CUDA_VISIBLE_DEVICES"] = num_str
        return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    def generate_qac(self, file_path: str, output_path: str) -> None:
        """生成QAC数据"""
        qa = qac_generate(
            self.configs["TOKENIZER_CONFIG"],
            self.configs["OPENAI_CONFIG"],
            self.configs["GENERATE_CONFIG"],
            self.configs["CHUNK_SIZE"],
            self.configs["MAX_SEQ_LEN"],
        )
        qac_result = qa.qa_process(file_path, output_path)
        return qac_result

    def create_knowledge_base(
        self, processed_file_path: str, qac_data: Dict[str, Any] = None
    ) -> None:
        """创建知识库"""
        create_q_a_base(
            processed_file_path,
            qac_data,
            self.configs["EMBEDDING_MODEL_PATH"],
            self.device,
        )

    def setup_es_index(self, processed_file_path: str) -> None:
        """设置和更新ES索引"""
        es = create_es_connection(
            self.configs["ES_HOST"],
            (self.configs["ES_USER"], self.configs["ES_PASSWORD"]),
        )

        mapping = load_mapping(self.configs["MAPPING_FILE"])
        create_return = create_index(es, self.configs["ES_INDEX"], mapping)
        if create_return:
            print("Index all ready exists, updating...")
        _, _, sources, chunks = read_qa_json(processed_file_path)
        store_chunks(sources, chunks, es, self.configs["ES_INDEX"])

    def delete_databases(self, faiss_database_path: str) -> Tuple[bool, bool]:
        """删除知识库和ES索引

        Returns:
            Tuple[bool, bool]: (知识库删除结果, ES索引删除结果)
        """
        # 删除知识库
        qa_deleted = delete_qa_base(faiss_database_path)

        # 删除ES索引
        es = create_es_connection(
            self.configs["ES_HOST"],
            (self.configs["ES_USER"], self.configs["ES_PASSWORD"]),
        )
        es_deleted = delete_es_index(es, self.configs["ES_INDEX"])

        print(f"Knowledge base deletion: {'success' if qa_deleted else 'failed'}")
        print(f"ES index deletion: {'success' if es_deleted else 'failed'}")

        return qa_deleted, es_deleted

    def get_query_processor(self) -> QueryProcessor:
        """获取或创建查询处理器（单例模式）"""
        if self.query_processor is None:
            self.query_processor = QueryProcessor(
                self.faiss_database_path,
                self.configs["ES_HOST"],
                self.configs["ES_USER"],
                self.configs["ES_PASSWORD"],
                self.configs["ES_INDEX"],
                reranker_path=self.configs["RERANK_MODEL_PATH"],
                emb_model_path=self.configs["EMBEDDING_MODEL_PATH"],
                device=self.device,
            )
        return self.query_processor

    def process_queries(
        self, query_list: List[str], output_file: Optional[str] = None
    ) -> Dict[str, Any]:
        """处理查询列表"""
        query_processor = self.get_query_processor()
        q_q, q_a, q_c, e_s_result, rag_output = query_processor.muti_linkup(
            query_list, top_k=self.configs["TOP_K"]
        )

        results = {
            "q_q": q_q,
            "q_a": q_a,
            "q_c": q_c,
            "e_s_result": e_s_result,
            "rag_output": rag_output,
        }

        if output_file:
            with open(output_file, "w") as f:
                json.dump(results, f, ensure_ascii=False, indent=4)

        return results

    def run_pipeline(
        self,
        queries: List[str],
        mode: PipelineMode = PipelineMode.QUERY_ONLY,
        input_file: Optional[str] = None,
        processed_file: Optional[str] = None,
        output_file: Optional[str] = None,
    ) -> Dict[str, Any]:
        """运行pipeline

        Args:
            queries: 查询列表
            mode: pipeline运行模式
            input_file: 输入文件路径（仅FULL模式需要）
            processed_file: 处理后的QA文件路径（FULL和BUILD_AND_QUERY模式需要）
            output_file: 输出文件路径（可选）

        Returns:
            查询结果
        """
        if mode == PipelineMode.FULL:
            if not input_file or not processed_file:
                raise ValueError(
                    "FULL mode requires both input_file and processed_file"
                )
            qac_data = self.generate_qac(input_file, processed_file)
            self.create_knowledge_base(processed_file, qac_data)
            self.setup_es_index(processed_file)

        elif mode == PipelineMode.BUILD_AND_QUERY:
            if not processed_file:
                raise ValueError("BUILD_AND_QUERY mode requires processed_file")
            self.create_knowledge_base(processed_file)
            self.setup_es_index(processed_file)
        if mode == PipelineMode.DELETE_ALL:
            qa_deleted, es_deleted = self.delete_databases(self.faiss_database_path)
            return {
                "status": "completed",
                "qa_deleted": qa_deleted,
                "es_deleted": es_deleted,
            }
        # 所有模式都会执行查询
        return self.process_queries(queries, output_file)


def main():
    # 使用示例
    pipeline = Pipeline("/root/data/navy_poc/config.yaml")

    # 示例1：仅查询模式
    query_list = ["中华人民共和国国家标准GB/T43347—2023的主要内容是什么？"]
    # results = pipeline.run_pipeline(queries=query_list, mode=PipelineMode.QUERY_ONLY)
    # print("Query results:", results["rag_output"])

    # # 示例2：完整pipeline
    # input_file = "/root/data/navy_poc/qac_data/test_sample_data/GB 15146.3-2008.md"
    # processed_file = os.path.join(input_file.split(".")[0] + "_processed.json")
    # results = pipeline.run_pipeline(
    #     queries=query_list,
    #     mode=PipelineMode.FULL,
    #     input_file=input_file,
    #     processed_file=processed_file,
    #     output_file="output.json",
    # )
    # print("Full pipeline results:", results["rag_output"])
    # # 示例3: 删除知识库和ES索引
    results = pipeline.run_pipeline(
        queries=[], mode=PipelineMode.DELETE_ALL  # 删除模式不需要查询
    )
    print("Deletion results:", results)


if __name__ == "__main__":
    main()
