# 模型加载器 - 单例模式实现懒加载

from config import (
    API_TYPE,
    LOCAL_EM_NAME,
    LOCAL_EM_PATH,
    LOCAL_LLM_NAME,
    LOCAL_LLM_PATH,
    OPENAI_API_KEY,
    OPENAI_EM,
    OPENAI_LLM,
    ARK_API_KEY,
    ARK_EM,
    ARK_LLM,
)


class ModelLoader:
    """模型加载器 - 单例模式，实现懒加载"""

    _instance = None
    _em = None
    _llm_tokenizer = None
    _llm = None
    _openai_client = None
    _ark_client = None

    def __new__(cls):
        if cls._instance is None:
            cls._instance = super(ModelLoader, cls).__new__(cls)
        return cls._instance

    def init_em(self):

        if API_TYPE != "local":
            return

        if self._em is None:

            from sentence_transformers import SentenceTransformer

            # 加载本地嵌入模型
            print(f"正在加载嵌入模型：{LOCAL_EM_NAME}")
            print(f"本地路径：{LOCAL_EM_PATH}")
            self._em = SentenceTransformer(
                model_name_or_path=LOCAL_EM_PATH,
                trust_remote_code=True,
                local_files_only=True,
            )
            print(f"本地模型{LOCAL_EM_NAME}加载成功！")

    def get_em(self):
        """获取嵌入模型实例"""
        return self._em

    def generate_embedding(self, text: str) -> list:
        """生成文本的嵌入向量"""
        match API_TYPE:
            case "local":  # 本地模型生成嵌入
                embedding = self._em.encode(
                    text,
                    normalize_embeddings=True,
                )
                return embedding.tolist()
            case "openai":  # OpenAI API生成嵌入
                response = self._openai_client.embeddings.create(
                    model=OPENAI_EM,
                    input=text,
                )
                return response.data[0].embedding
            case "ark":  # 火山方舟API生成嵌入
                response = self._ark_client.embeddings.create(
                    model=ARK_EM,
                    input=text,
                )
                return response.data[0].embedding
            case _:
                raise ValueError(f"不支持的LLM类型: {API_TYPE}")

    def init_llm(self):
        """初始化本地大语言模型"""
        if API_TYPE != "local":
            return

        if self._llm_tokenizer is None and self._llm is None and LOCAL_LLM_PATH:
            import torch
            from transformers import AutoTokenizer, AutoModelForCausalLM

            print(f"正在加载大语言模型：{LOCAL_LLM_NAME}")
            print(f"本地路径：{LOCAL_LLM_PATH}")

            # 首先加载分词器
            self._llm_tokenizer = AutoTokenizer.from_pretrained(
                pretrained_model_name_or_path=LOCAL_LLM_PATH,
                trust_remote_code=True,
                local_files_only=True,
            )
            print("✅ 分词器加载成功")

            # 加载模型，设置torch_dtype为bfloat16以减少内存使用
            self._llm = AutoModelForCausalLM.from_pretrained(
                pretrained_model_name_or_path=LOCAL_LLM_PATH,
                trust_remote_code=True,
                local_files_only=True,
                dtype=torch.bfloat16,
                low_cpu_mem_usage=True,
            )
            print("✅ 大语言模型加载成功")

            # 如果GPU可用，尝试将模型移至GPU
            if torch.cuda.is_available():
                print("尝试将大语言模型移至GPU...")
                self._llm = self._llm.to("cuda")
                print("✅ 成功：大语言模型已成功移至GPU")

    def get_llm_tokenizer(self):
        return self._llm_tokenizer

    def get_llm(self):
        return self._llm

    def generate_llm_response(
        self,
        prompt,
        max_new_tokens=100,
        do_sample=True,
        temperature=0.7,
        top_p=0.95,
    ):
        """生成大语言模型的响应"""
        match API_TYPE:
            case "local":
                if not self._llm or not self._llm_tokenizer:
                    self.init_llm()

                import torch

                # 使用分词器编码输入
                inputs = self._llm_tokenizer(prompt, return_tensors="pt")

                # 如果GPU可用，将输入移至GPU
                if torch.cuda.is_available():
                    inputs = {k: v.to("cuda") for k, v in inputs.items()}

                with torch.no_grad():
                    # 生成回复
                    outputs = self._llm.generate(
                        **inputs,
                        max_new_tokens=max_new_tokens,
                        do_sample=do_sample,
                        temperature=temperature,
                        top_p=top_p,
                    )

                # 解码生成的文本
                generated_text = self._llm_tokenizer.decode(
                    outputs[0],
                    skip_special_tokens=True,
                )

                # 返回生成的文本（不包含输入的提示词）
                return generated_text[len(prompt) :].strip()
            case "openai":
                if not self._openai_client:
                    self.init_api_client()
                response = self._openai_client.chat.completions.create(
                    model=OPENAI_EM,
                    messages=[{"role": "user", "content": prompt}],
                )
                return response.choices[0].message.content.strip()
            case "ark":
                if not self._ark_client:
                    self.init_api_client()
                response = self._ark_client.chat.completions.create(
                    model=ARK_LLM,
                    messages=[{"role": "user", "content": prompt}],
                )
                return response.choices[0].message.content.strip()
            case _:
                raise ValueError(f"不支持的API类型: {API_TYPE}")

    def init_api_client(self):
        """获取API客户端实例"""
        match API_TYPE:  # 根据LLM类型检查相应的API密钥
            case "local":
                return
            case "openai":
                if not OPENAI_API_KEY:
                    raise ValueError("OPENAI API密钥未设置，请配置OPENAI_API_KEY")
                if self._openai_client is None:
                    # 导入OpenAI SDK
                    from openai import OpenAI

                    # 初始化OpenAI客户端
                    print(f"正在加载API客户端：{API_TYPE}")
                    print(f"API模型名称：{OPENAI_EM}")
                    self._openai_client = OpenAI(api_key=OPENAI_API_KEY)
            case "ark":
                if not ARK_API_KEY:
                    raise ValueError("ARK API密钥未设置，请配置ARK_API_KEY")
                if self._ark_client is None:
                    # 导入火山方舟SDK
                    from volcenginesdkarkruntime import Ark

                    # 初始化火山方舟客户端
                    print(f"正在加载API客户端：{API_TYPE}")
                    self._ark_client = Ark(api_key=ARK_API_KEY)
            case _:
                raise ValueError(f"不支持的API类型: {API_TYPE}")

    def get_openai_client(self):
        """获取OpenAI客户端实例"""
        return self._openai_client

    def get_ark_client(self):
        """获取火山方舟客户端实例"""
        return self._ark_client

    def init_model(self):
        """初始化所有模型"""
        match API_TYPE:
            case "local":  # 使用本地模型
                try:
                    self.init_em()  # 初始化嵌入模型
                    self.init_llm()  # 初始化大语言模型
                except Exception as e:
                    print(f"初始化本地嵌入模型时出错: {e}")
                    print("\n可能的解决方案:")
                    print(f"1. 确认模型已正确下载: python download_models.py")
                    print(f"2. 检查本地嵌入模型{LOCAL_EM_NAME}路径: {LOCAL_EM_PATH}")
                    print("3. 尝试重启应用程序")
                    print("4. 执行 python download_models.py 下载模型")
                    raise
            case _:  # 使用API
                try:
                    # 初始化API客户端
                    self.init_api_client()
                except Exception as e:
                    print(f"初始化API客户端失败: {e}")
                    print("\n可能的解决方案:")
                    print("1. 检查API密钥是否正确设置")
                    print("2. 确认网络连接正常")
                    print("3. 切换到本地模型")


# 创建全局模型加载器实例
model_loader = ModelLoader()
model_loader.init_model()
