"""
模型配置管理

负责管理本地和云端模型的配置。
"""

import os
import torch
from typing import Dict, Any

from .base_config import BaseConfig
from .enums import Mode


class ModelConfig(BaseConfig):
    """模型配置管理类"""

    @property
    def mode(self) -> Mode:
        """运行模式"""
        mode_str = self._get_env("MODE", "local").lower()
        try:
            from .enums import Mode
            return Mode(mode_str)
        except ValueError:
            raise ValueError(f"无效的运行模式: {mode_str}，请选择 'local' 或 'cloud'")

    @property
    def is_local_mode(self) -> bool:
        """是否为本地模式"""
        return self.mode == Mode.LOCAL

    @property
    def is_cloud_mode(self) -> bool:
        """是否为云端模式"""
        return self.mode == Mode.CLOUD

    @property
    def device(self) -> str:
        """计算设备"""
        if torch.cuda.is_available():
            return self._get_env("DEVICE", "cuda")
        else:
            return "cpu"

    # 本地模型配置
    @property
    def local_model_path(self) -> str:
        """本地嵌入模型路径"""
        path = self._get_env("LOCAL_MODEL_PATH")
        if not path and self.is_local_mode:
            raise ValueError("本地模式下必须设置 LOCAL_MODEL_PATH")
        return path

    @property
    def ollama_model(self) -> str:
        """Ollama模型名称"""
        if self.is_local_mode:
            return self._get_env("OLLAMA_MODEL", "qwen3:0.6b")
        return ""

    @property
    def ollama_timeout(self) -> float:
        """Ollama请求超时时间"""
        return self._get_env_float("OLLAMA_TIMEOUT", 360.0)

    @property
    def embed_batch_size(self) -> int:
        """嵌入批次大小"""
        if self.device == "cuda":
            return self._get_env_int("EMBED_BATCH_SIZE_CUDA", 16)
        else:
            return self._get_env_int("EMBED_BATCH_SIZE_CPU", 4)

    # 云端模型配置
    @property
    def openai_api_key(self) -> str:
        """OpenAI API密钥"""
        if self.is_cloud_mode:
            return self._get_env("OPENAI_API_KEY", required=True)
        return ""

    @property
    def openai_base_url(self) -> str:
        """OpenAI API基础URL"""
        if self.is_cloud_mode:
            return self._get_env("OPENAI_BASE_URL", "https://api.openai.com/v1")
        return ""

    @property
    def openai_embedding_model(self) -> str:
        """云端嵌入模型名称"""
        if self.is_cloud_mode:
            return self._get_env("OPENAI_EMBEDDING_MODEL", "text-embedding-3-small")
        return ""

    @property
    def openai_chat_model(self) -> str:
        """云端聊天模型名称"""
        if self.is_cloud_mode:
            return self._get_env("OPENAI_CHAT_MODEL", "gpt-3.5-turbo")
        return ""

    def get_embedding_config(self) -> Dict[str, Any]:
        """获取嵌入模型配置"""
        if self.is_local_mode:
            return {
                "mode": "local",
                "model_path": self.local_model_path,
                "device": self.device,
                "batch_size": self.embed_batch_size
            }
        else:
            return {
                "mode": "cloud",
                "api_key": self.openai_api_key,
                "base_url": self.openai_base_url,
                "model": self.openai_embedding_model
            }

    def get_llm_config(self) -> Dict[str, Any]:
        """获取LLM配置"""
        if self.is_local_mode:
            return {
                "mode": "local",
                "model": self.ollama_model,
                "timeout": self.ollama_timeout
            }
        else:
            return {
                "mode": "cloud",
                "api_key": self.openai_api_key,
                "base_url": self.openai_base_url,
                "model": self.openai_chat_model
            }

    def validate_paths(self) -> None:
        """验证模型路径"""
        if self.is_local_mode:
            self.validate_path(self.local_model_path, "本地嵌入模型路径")