import json
import os

from langchain_core.language_models import BaseChatModel
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
from ..config.agents import LLMType
from ..config.env import get_model_config

default_llm_temperature: float = 0.01
default_llm_max_model_len: int = 64000


class LLMConfig(BaseModel):
    model: str
    base_url: str | None = None
    api_key: str | None = None
    temperature: float = default_llm_temperature
    max_model_len: int = default_llm_max_model_len
    kwargs: dict = Field(default_factory=dict)
    type: LLMType = LLMType.LLM_CHAT
    options: dict = Field(default_factory=dict)

    @classmethod
    def build_from_type(cls, llm_type: LLMType) -> "LLMConfig":
        """
        Build LLMConfig based on LLM type using predefined configurations with dynamic reflection
        """
        model, base_url, api_key, kwargs = get_model_config(name=os.getenv(llm_type.value))
        options = {}
        try:
            options = json.loads(kwargs.pop("options", "{}")) if kwargs else {}
        except:
            pass
        return LLMConfig(
            model=model,
            base_url=base_url,
            api_key=api_key,
            type=llm_type,
            temperature=kwargs.pop("temperature", default_llm_temperature),
            max_model_len=kwargs.pop("max_model_len", default_llm_max_model_len),
            kwargs=kwargs,
            options=options
        )

    @classmethod
    def build(cls,
              model: str,
              base_url: str | None = None,
              api_key: str | None = None,
              llm_type: LLMType = LLMType.LLM_CHAT,
              temperature: float = default_llm_temperature,
              max_model_len: int = default_llm_max_model_len,
              options: dict = None,
              kwargs: dict = None) -> "LLMConfig":
        """
        Build LLMConfig with custom parameters
        """
        return LLMConfig(
            model=model,
            base_url=base_url,
            api_key=api_key,
            type=llm_type,
            temperature=temperature,
            max_model_len=max_model_len if max_model_len > 0 else default_llm_max_model_len,
            kwargs=kwargs or {},
            options=options or {}
        )


def create_openai_llm(config: LLMConfig, **kwargs) -> BaseChatModel:
    combined_kwargs = {**config.kwargs, **kwargs}

    llm_kwargs = {"model": config.model, "temperature": config.temperature}

    if config.base_url:
        llm_kwargs["base_url"] = config.base_url

    if config.api_key:
        llm_kwargs["api_key"] = config.api_key

    llm_kwargs.update(combined_kwargs)
    filtered_kwargs = {k: v for k, v in llm_kwargs.items() if v is not None}

    json_params = ["extra_body", "stream_options"]
    for param in json_params:
        if param in filtered_kwargs and isinstance(filtered_kwargs[param], str):
            try:
                filtered_kwargs[param] = json.loads(filtered_kwargs[param])
            except (json.JSONDecodeError, TypeError):
                print(f"Error parsing {param}: {filtered_kwargs[param]}")
                filtered_kwargs[param] = {}

    model_kwargs = {}
    if "stream_options" in filtered_kwargs:
        model_kwargs["stream_options"] = filtered_kwargs.pop("stream_options")

    final_kwargs = {k: v for k, v in filtered_kwargs.items() if
                    k in {"model", "temperature", "max_tokens", "logprobs", "use_responses_api",
                          "timeout", "max_retries", "api_key", "base_url", "extra_body"}}

    if model_kwargs:
        final_kwargs["model_kwargs"] = model_kwargs

    llm = ChatOpenAI(**final_kwargs)
    if config.options:
        llm.model_config.update(config.options.items())
    return llm
