import json
import os
from typing import List, Optional, Union

from langchain_core.language_models import BaseChatModel
from langchain_openai.chat_models.base import BaseChatOpenAI
from pydantic import BaseModel, Field
from loguru import logger
from enum import Enum
from ..config.agents import LLMType
from ..config.env import get_model_config

default_llm_temperature: float = 0.01
default_llm_max_model_len: int = 64000

class LLMContentModality(str, Enum):
    """内容模态枚举类"""
    TEXT = "text"
    IMAGE = "image"
    AUDIO = "audio"
    VIDEO = "video"

class LLMConfig(BaseModel):
    model: str
    base_url: str | None = None
    api_key: str | None = None
    temperature: float = default_llm_temperature
    max_model_len: int = default_llm_max_model_len
    input_modality: List[LLMContentModality] = Field(default_factory=lambda: [LLMContentModality.TEXT])
    output_modality: List[LLMContentModality] = Field(default_factory=lambda: [LLMContentModality.TEXT])
    kwargs: dict = Field(default_factory=dict)
    type: LLMType = LLMType.LLM_CHAT
    options: dict = Field(default_factory=dict)
    ext: dict = Field(default_factory=dict)

    @classmethod
    def build_from_type(cls, llm_type: LLMType) -> "LLMConfig":
        """
        Build LLMConfig based on LLM type using predefined configurations with dynamic reflection
        """
        name = os.getenv(llm_type.value)
        if not name:
            return None
        model, base_url, api_key, kwargs = get_model_config(name=name)
        options = {}
        try:
            options = json.loads(kwargs.pop("options", "{}")) if kwargs else {}
        except:
            pass
        return LLMConfig(
            model=model,
            base_url=base_url,
            api_key=api_key,
            type=llm_type,
            temperature=kwargs.pop("temperature", default_llm_temperature),
            max_model_len=kwargs.pop("max_model_len", default_llm_max_model_len),
            kwargs=kwargs,
            options=options
        )

    @classmethod
    def build_from_name(cls, model_name: str) -> "LLMConfig":
        """
        根据模型名称构建LLMConfig

        Args:
            model_name: 模型名称
            llm_type: LLM类型，默认为LLM_CHAT

        Returns:
            LLMConfig实例
        """
        model, base_url, api_key, kwargs = get_model_config(name=model_name)
        options = {}
        try:
            options = json.loads(kwargs.pop("options", "{}")) if kwargs else {}
        except:
            pass
        return LLMConfig(
            model=model,
            base_url=base_url,
            api_key=api_key,
            temperature=kwargs.pop("temperature", default_llm_temperature),
            max_model_len=kwargs.pop("max_model_len", default_llm_max_model_len),
            kwargs=kwargs,
            options=options
        )

    @classmethod
    def build(cls,
              model: str,
              base_url: str | None = None,
              api_key: str | None = None,
              llm_type: LLMType = LLMType.LLM_CHAT,
              temperature: float = default_llm_temperature,
              max_model_len: int = default_llm_max_model_len,
              input_modality: List[LLMContentModality] | LLMContentModality | str | List[str] = None,
              output_modality: List[LLMContentModality] | LLMContentModality | str | List[str] = None,
              options: dict = None,
              kwargs: dict = None,
              ext: dict = None) -> "LLMConfig":
        """
        Build LLMConfig with custom parameters
        """
        
        def normalize_modality(modality):
            if modality is None:
                return [LLMContentModality.TEXT]
            if isinstance(modality, (str, LLMContentModality)):
                return [LLMContentModality(modality) if isinstance(modality, str) else modality]
            return [LLMContentModality(item) if isinstance(item, str) else item for item in modality]
        
        return LLMConfig(
            model=model,
            base_url=base_url,
            api_key=api_key,
            type=llm_type,
            temperature=temperature,
            max_model_len=max_model_len if max_model_len > 0 else default_llm_max_model_len,
            input_modality=normalize_modality(input_modality),
            output_modality=normalize_modality(output_modality),
            kwargs=kwargs or {},
            options=options or {},
            ext=ext or {}
        )


class InsCodeModel(BaseModel):
    """
    InsCode模型封装类，对LLMConfig和BaseChatModel进行封装，
    同时包含输入输出模态信息
    """
    config: LLMConfig
    chat_model: Optional[BaseChatModel] = None
    input_modality: List[LLMContentModality] = Field(default_factory=lambda: [LLMContentModality.TEXT])
    output_modality: List[LLMContentModality] = Field(default_factory=lambda: [LLMContentModality.TEXT])
    max_model_len: int = default_llm_max_model_len
    valid_image_sizes: List[str] = Field(default_factory=lambda: [])
    
    class Config:
        arbitrary_types_allowed = True
    
    def __init__(self, **data):
        super().__init__(**data)
        if not self.chat_model and self.config:
            self.chat_model = create_openai_llm(self.config)
            
        if self.config:
            if hasattr(self.config, 'input_modality') and self.config.input_modality:
                self.input_modality = self.config.input_modality
            if hasattr(self.config, 'output_modality') and self.config.output_modality:
                self.output_modality = self.config.output_modality
            if hasattr(self.config, 'max_model_len') and self.config.max_model_len:
                self.max_model_len = self.config.max_model_len 
            if hasattr(self.config, 'kwargs') and self.config.kwargs:
                self.valid_image_sizes = self.config.kwargs.get('valid_image_sizes', [])
    
    @property
    def model_name(self) -> str:
        """获取模型名称"""
        if self.chat_model and hasattr(self.chat_model, 'model_name'):
            return self.chat_model.model_name
        elif self.config:
            return self.config.model
        return "unknown"
    
    @classmethod
    def build(cls, 
                   config: Union["LLMConfig", "InsCodeModel"],
                   input_modality: List[LLMContentModality] = None,
                   output_modality: List[LLMContentModality] = None,
                   valid_image_sizes: List[str] = []) -> "InsCodeModel":
        """
        从LLMConfig或InsCodeModel实例创建InsCodeModel
        
        Args:
            config: LLMConfig或InsCodeModel实例
            input_modality: 输入模态列表，可选
            output_modality: 输出模态列表，可选
            
        Returns:
            InsCodeModel实例
        """
        if not config:
            return None
        
        if isinstance(config, InsCodeModel):
            return config
        elif isinstance(config, LLMConfig):
            return cls(
                config=config,
                input_modality=input_modality or config.input_modality,
                output_modality=output_modality or config.output_modality,
                max_model_len=config.max_model_len,
                valid_image_sizes=valid_image_sizes
            )
        
        return None

    def supports_input_modality(self, modality: LLMContentModality) -> bool:
        """检查是否支持指定的输入模态"""
        return modality in self.input_modality
    
    def supports_output_modality(self, modality: LLMContentModality) -> bool:
        """检查是否支持指定的输出模态"""
        return modality in self.output_modality
    
    async def close(self):
        """关闭模型连接"""
        if self.chat_model and isinstance(self.chat_model, BaseChatOpenAI):
            await close_openai_llm(self.chat_model, self.model_name or "InsCodeModel")


def create_openai_llm(config: LLMConfig, **kwargs) -> BaseChatModel:
    combined_kwargs = {**config.kwargs, **kwargs}

    llm_kwargs = {"model": config.model, "temperature": config.temperature}

    if config.base_url:
        llm_kwargs["base_url"] = config.base_url

    if config.api_key:
        llm_kwargs["api_key"] = config.api_key

    llm_kwargs.update(combined_kwargs)
    filtered_kwargs = {k: v for k, v in llm_kwargs.items() if v is not None}

    json_params = ["extra_body", "stream_options"]
    for param in json_params:
        if param in filtered_kwargs and isinstance(filtered_kwargs[param], str):
            try:
                filtered_kwargs[param] = json.loads(filtered_kwargs[param])
            except (json.JSONDecodeError, TypeError):
                print(f"Error parsing {param}: {filtered_kwargs[param]}")
                filtered_kwargs[param] = {}

    model_kwargs = {}
    if "stream_options" in filtered_kwargs:
        model_kwargs["stream_options"] = filtered_kwargs.pop("stream_options")

    final_kwargs = {k: v for k, v in filtered_kwargs.items() if
                    k in {"model", "temperature", "max_tokens", "logprobs", "use_responses_api","request_timeout",
                          "timeout", "max_retries", "api_key", "base_url", "extra_body", "max_completion_tokens",
                          "reasoning_effort"}}

    if model_kwargs:
        final_kwargs["model_kwargs"] = model_kwargs

    llm = BaseChatOpenAI(**final_kwargs)
    if config.options:
        llm.model_config.update(config.options.items())
    return llm


async def close_openai_llm(llm_client: BaseChatOpenAI, client_name: str = "LLM") -> None:
    if not llm_client:
        return

    try:
        if hasattr(llm_client, 'root_async_client') and llm_client.root_async_client:
            await llm_client.root_async_client.close()
    except Exception as e:
        logger.warning(f"Error closing {client_name} async client: {e}")

    try:
        # Close sync client if available
        if hasattr(llm_client, 'root_client') and llm_client.root_client:
            llm_client.root_client.close()
    except Exception as e:
        logger.warning(f"Error closing {client_name} sync client: {e}")
