"""优化器模块，用于选择最优的模型和配置参数"""

from .utils.display import display
from .utils.constants import MODEL_SIZES


class Optimizer:
    """
    功能：基于硬件能力优化模型选择和配置参数
    :param config: 应用的配置信息
    :return: 优化器实例
    """

    def __init__(self, config):
        self.config = config
        # CPU模型得分阈值
        self.model_score_thresholds = {
            "deepseek-r1:671b": 350,
            "deepseek-r1:70b": 180,
            "deepseek-r1:32b": 100,
            "deepseek-r1:14b": 50,
            "deepseek-r1:8b": 30,
            "deepseek-r1:7b": 20,
            "deepseek-r1:1.5b": 8,
            "deepseek-r1:1.5b-qwen-distill-q4_K_M": 5,
        }

    def optimize_params(self, hardware):
        """
        功能：为给定硬件计算最优参数
        :param hardware: 包含硬件信息的字典
        :return: 包含最优配置参数的字典
        """
        params = {
            "model": self.select_model(hardware),
            "threads": self.calculate_threads(hardware),
            "gpu_layers": self.calculate_gpu_layers(hardware),
            "ctx_size": self.calculate_ctx_size(hardware),
        }
        return params

    def select_model(self, hardware):
        """
        功能：基于硬件能力选择合适的模型
        :param hardware: 包含硬件信息的字典
        :return: 选择的模型标签
        """
        # 优先使用config中的默认模型
        default_model = self.config.get("default_model")
        if default_model:
            return default_model

        cpu = hardware.get("cpu", {})
        gpu = hardware.get("gpu")
        disk = hardware.get("disk", {})
        gpu_vram = gpu.get("vram") if gpu else 0
        disk_free_gb = hardware["disk"]["free_gb"] if "disk" in hardware else 0

        if not gpu:
            return self.select_cpu_model(cpu, disk)

        # GPU模型选择逻辑
        if gpu_vram >= 32:  # 32GB VRAM
            return "deepseek-r1:32b" if disk_free_gb >= 140 else "deepseek-r1:14b"
        if gpu_vram >= 16:
            return "deepseek-r1:14b" if disk_free_gb >= 28 else "deepseek-r1:7b"
        if gpu_vram >= 8:
            return "deepseek-r1:7b" if disk_free_gb >= 14 else "deepseek-r1:1.5b"
        return "deepseek-r1:1.5b"

    def fallback_model(self, disk_free_gb):
        """
        功能：当磁盘空间有限时的备用模型选择策略
        :param disk_free_gb: 可用磁盘空间（GB）
        :return: 备用模型标签
        """
        if disk_free_gb >= 5:
            return "deepseek-r1:1.5b"
        if disk_free_gb >= 2:
            return "qwen:0.5b"
        return self.minimal_model()

    def minimal_model(self):
        """
        功能：返回最小可用模型
        :return: 最小模型的标签
        """
        return "qwen:0.5b"

    def calculate_threads(self, hardware):
        """
        功能：计算最优的CPU线程数
        :param hardware: 包含硬件信息的字典
        :return: 推荐的CPU线程数
        """
        return max(1, hardware["cpu"]["threads"] - 1)

    def calculate_gpu_layers(self, hardware):
        """
        功能：计算最优的GPU加速层数
        :param hardware: 包含硬件信息的字典
        :return: 推荐的GPU加速层数
        """
        if not hardware["gpu"]:
            return 0
        return min(99, int(hardware["gpu"]["vram"]))

    def calculate_ctx_size(self, hardware):
        """
        功能：计算最优的上下文窗口大小
        :param hardware: 包含硬件信息的字典
        :return: 推荐的上下文窗口大小
        """
        gpu = hardware.get("gpu")
        gpu_vram = gpu.get("vram") if gpu else 0
        if gpu_vram > 0:
            return min(8192, int(gpu_vram * 512))
        # 纯CPU模式使用内存
        return min(4096, int(hardware["memory"] * 256))

    def select_cpu_model(self, cpu, disk):
        """
        功能：CPU模式下的模型选择逻辑
        :param cpu: 包含CPU信息的字典
        :param disk: 包含磁盘信息的字典
        :return: 选择的模型标签
        """
        cpu_score = cpu.get("score", 1)
        disk_free = disk.get("free_gb", 0)
        for model, min_score in sorted(
            self.model_score_thresholds.items(), key=lambda x: x[1], reverse=True
        ):
            # 使用公共常量获取模型大小
            model_size = MODEL_SIZES.get(model, 0)
            if cpu_score >= min_score:
                if disk_free >= model_size * 1.2:
                    return model
                display(
                    f"⚠️ CPU足够但磁盘空间不足({disk_free}"
                    "GB < {model_size*1.2}GB)，尝试更小模型"
                )

        min_model = "qwen:0.5b"
        # 使用公共常量获取最小模型大小
        min_size = MODEL_SIZES.get(min_model, 1)
        if disk_free < min_size * 1.2:
            display(
                "🛑🛑🛑🛑🛑🛑🛑🛑🛑🛑🛑🛑🛑🛑🛑🛑"
                f" 磁盘空间不足({disk_free}GB)，无法运行最小模型({min_model})"
            )
            raise OSError("磁盘空间不足，无法运行任何模型")
        return min_model
