import json
from typing import Annotated, Any, Literal

from pydantic import BaseModel, BeforeValidator, Field

from weave import Model, op
from weave.prompt.prompt import format_message_with_template_vars
from weave.trace.context.weave_client_context import WeaveInitError, get_weave_client
from weave.trace_server.interface.builtin_object_classes import base_object_def
from weave.trace_server.trace_server_interface import (
    CompletionsCreateReq,
    CompletionsCreateRequestInputs,
)
from weave.utils.project_id import to_project_id

ResponseFormat = Literal["json_object", "json_schema", "text"]


def is_response_format(value: Any) -> bool:
    return isinstance(value, str) and value in ["json_object", "text"]


class Message(BaseModel):
    """A message in a conversation with an LLM.

    Attributes:
        role: The role of the message's author. Can be: system, user, assistant, function or tool.
        content: The contents of the message. Required for all messages, but may be null for assistant messages with function calls.
        name: The name of the author of the message. Required if role is "function". Must match the name of the function represented in content.
              Can contain characters (a-z, A-Z, 0-9), and underscores, with a maximum length of 64 characters.
        function_call: The name and arguments of a function that should be called, as generated by the model.
        tool_call_id: Tool call that this message is responding to.
    """

    role: str
    content: str | list[dict] | None = None
    name: str | None = None
    function_call: dict | None = None
    tool_call_id: str | None = None


class LLMStructuredCompletionModelDefaultParams(BaseModel):
    """Default parameters for LLMStructuredCompletionModel.

    Attributes:
        messages_template: A list of Messages to use as a template. Messages can contain
            template variables using {variable_name} syntax. These will be substituted
            when predict() is called with template_vars.
        prompt: A reference string to a MessagesPrompt object. If provided, this takes
            precedence over messages_template. The referenced prompt's format() method will
            be used to generate messages with template variable substitution.
            Example: "weave:///entity/project/object/my_prompt:latest"
    """

    # This is a list of Messages, loosely following litellm's message format
    # https://docs.litellm.ai/docs/completion/input#properties-of-messages
    messages_template: list[Message] | None = None
    prompt: base_object_def.RefStr | None = None

    temperature: float | None = None
    top_p: float | None = None
    max_tokens: int | None = None
    presence_penalty: float | None = None
    frequency_penalty: float | None = None
    stop: list[str] | None = None
    n_times: int | None = None
    functions: list[dict] | None = None

    # Either json, text, or json_schema
    response_format: ResponseFormat | None = None

    # TODO: Currently not used. Fast follow up with json_schema
    # if default_params.response_format is set to JSON_SCHEMA, this will be used
    # response_format_schema: dict | None = None


def cast_to_message_list(obj: Any) -> list[Message]:
    if isinstance(obj, Message):
        return [obj]
    elif isinstance(obj, dict):
        return [Message.model_validate(obj)]
    elif isinstance(obj, str):
        return [Message(content=obj, role="user")]
    elif isinstance(obj, list):
        return [cast_to_message(item) for item in obj]
    raise TypeError("Unable to cast to Message")


def cast_to_message(obj: Any) -> Message:
    if isinstance(obj, Message):
        return obj
    elif isinstance(obj, dict):
        return Message.model_validate(obj)
    elif isinstance(obj, str):
        return Message(content=obj, role="user")
    raise TypeError("Unable to cast to Message")


def cast_to_llm_structured_model_params(
    obj: Any,
) -> LLMStructuredCompletionModelDefaultParams:
    if isinstance(obj, LLMStructuredCompletionModelDefaultParams):
        return obj
    elif isinstance(obj, dict):
        return LLMStructuredCompletionModelDefaultParams.model_validate(obj)
    raise TypeError("Unable to cast to LLMStructuredCompletionModelDefaultParams")


MessageListLike = Annotated[list[Message], BeforeValidator(cast_to_message_list)]
MessageLike = Annotated[Message, BeforeValidator(cast_to_message)]
LLMStructuredModelParamsLike = Annotated[
    LLMStructuredCompletionModelDefaultParams,
    BeforeValidator(cast_to_llm_structured_model_params),
]


class LLMStructuredCompletionModel(Model):
    # <provider>/<model> or ref to a provider model
    llm_model_id: str | base_object_def.RefStr

    default_params: LLMStructuredCompletionModelDefaultParams = Field(
        default_factory=LLMStructuredCompletionModelDefaultParams
    )

    @op
    def predict(
        self,
        user_input: MessageListLike | None = None,
        config: LLMStructuredModelParamsLike | None = None,
        **template_vars: Any,
    ) -> Message | str | dict[str, Any]:
        """Generates a prediction by preparing messages (template + user_input)
        and calling the LLM completions endpoint with overridden config, using the provided client.

        Messages are prepared in one of two ways:
        1. If default_params.prompt is set, the referenced MessagesPrompt object is
           loaded and its format() method is called with template_vars to generate messages.
        2. If default_params.messages_template is set (and prompt is not), the template
           messages are used with template variable substitution.

        Note: If both prompt and messages_template are provided, prompt takes precedence.

        Args:
            user_input: The user input messages to append after template messages
            config: Optional configuration to override default parameters
            **template_vars: Variables to substitute in the messages template using {variable_name} syntax
        """
        if user_input is None:
            user_input = []

        current_client = get_weave_client()
        if current_client is None:
            raise WeaveInitError(
                "You must call `weave.init(<project_name>)` first, to predict with a LLMStructuredCompletionModel"
            )

        req = self.prepare_completion_request(
            project_id=to_project_id(current_client.entity, current_client.project),
            user_input=user_input,
            config=config,
            **template_vars,
        )

        # 5. Call the LLM API
        try:
            api_response = current_client.server.completions_create(req=req)
        except Exception as e:
            raise RuntimeError("Failed to call LLM completions endpoint.") from e

        # 6. Extract the message from the API response
        try:
            # The 'response' attribute of CompletionsCreateRes is a dict
            response_payload = api_response.response
            response_format = (
                req.inputs.response_format.get("type")
                if req.inputs.response_format is not None
                else None
            )
            return parse_response(response_payload, response_format)
        except (
            KeyError,
            IndexError,
            TypeError,
            AttributeError,
            json.JSONDecodeError,
        ) as e:
            raise RuntimeError(
                f"Failed to extract message from LLM response payload. Response: {api_response.response}"
            ) from e

    def prepare_completion_request(
        self,
        project_id: str,
        user_input: MessageListLike,
        config: LLMStructuredModelParamsLike | None,
        **template_vars: Any,
    ) -> CompletionsCreateReq:
        # Ensure user_input is properly converted to a list of Message objects
        # This is needed because the @op decorator might interfere with Pydantic validation
        if not isinstance(user_input, list) or (
            user_input and not isinstance(user_input[0], Message)
        ):
            user_input = cast_to_message_list(user_input)

        # 1. Prepare messages from messages_template (if no prompt is set)
        # Note: If prompt is set, we don't prepare messages here - we pass the prompt
        # reference to the completions endpoint which will resolve and substitute it
        template_msgs = None

        # Only use messages_template if prompt is NOT set
        if (
            self.default_params
            and self.default_params.messages_template
            and not self.default_params.prompt
        ):
            template_msgs = self.default_params.messages_template
            if template_vars:
                # Convert Message objects to dicts, apply template vars, convert back
                formatted_dicts = [
                    format_message_with_template_vars(
                        msg.model_dump(exclude_none=True), **template_vars
                    )
                    for msg in template_msgs
                ]
                template_msgs = [Message.model_validate(d) for d in formatted_dicts]

        prepared_messages_dicts = _prepare_llm_messages(template_msgs, user_input)

        # 2. Prepare completion parameters, starting with defaults from LLMStructuredCompletionModel
        completion_params: dict[str, Any] = {}
        default_p_model = self.default_params
        if default_p_model:
            completion_params = parse_params_to_litellm_params(default_p_model)

        # 3. Override parameters with the provided config dictionary
        if config:
            completion_params = {
                **completion_params,
                **parse_params_to_litellm_params(config),
            }

        # 4. Create the completion inputs
        model_id_str = str(self.llm_model_id)

        # Include template_vars if they exist
        if template_vars:
            completion_params["template_vars"] = template_vars

        completion_inputs = CompletionsCreateRequestInputs(
            model=model_id_str, messages=prepared_messages_dicts, **completion_params
        )
        req = CompletionsCreateReq(
            project_id=project_id,
            inputs=completion_inputs,
        )

        return req


def parse_response(
    response_payload: dict, response_format: ResponseFormat | None
) -> Message | str | dict[str, Any]:
    if response_payload.get("error"):
        # Or handle more gracefully depending on desired behavior
        raise RuntimeError(f"LLM API returned an error: {response_payload['error']}")

    # Assuming OpenAI-like structure: a list of choices, first choice has the message
    output_message_dict = response_payload["choices"][0]["message"]

    if response_format == "text":
        return output_message_dict["content"]
    elif response_format == "json_object":
        return json.loads(output_message_dict["content"])
    else:
        raise ValueError(f"Invalid response_format: {response_format}")


def _prepare_llm_messages(
    template_messages: list[Message] | None,
    user_input: list[Message],
) -> list[dict[str, Any]]:
    """Prepares a list of message dictionaries for the LLM API from a message template and user input.
    Helper function for PlaygroundModel.predict.
    Returns a list of message dictionaries.
    """
    final_messages_dicts: list[dict[str, Any]] = []

    # 1. Initialize messages from template
    if template_messages:
        for msg_template in template_messages:
            msg_dict = msg_template.model_dump(exclude_none=True)
            final_messages_dicts.append(msg_dict)

    # 2. Append user_input messages
    for u_msg in user_input:
        final_messages_dicts.append(u_msg.model_dump(exclude_none=True))

    return final_messages_dicts


def parse_params_to_litellm_params(
    params_source: LLMStructuredCompletionModelDefaultParams,
) -> dict[str, Any]:
    final_params: dict[str, Any] = {}
    source_dict_to_iterate: dict[str, Any] = params_source.model_dump(exclude_none=True)

    for key, value in source_dict_to_iterate.items():
        if key == "response_format":
            litellm_response_format_value = None
            if isinstance(value, str) and is_response_format(value):
                litellm_response_format_value = {"type": value}
            elif (
                isinstance(value, dict)
                and "type" in value
                and is_response_format(value["type"])
            ):  # Pre-formed dict with valid type
                litellm_response_format_value = value

            if litellm_response_format_value is not None:
                final_params["response_format"] = litellm_response_format_value
        elif key == "n_times":
            final_params["n"] = value
        elif key == "messages_template":
            pass
        elif key == "functions" or key == "stop":
            if isinstance(value, list) and len(value) > 0:
                final_params[key] = value
        else:
            final_params[key] = value

    return final_params
