import pydantic
from typing import Optional, Dict, Any
from . import base_original_provider_usage


class OpenAIResponsesUsage(base_original_provider_usage.BaseOriginalProviderUsage):
    """OpenAI Responses API calls token usage data. Updated 11.03.2025"""

    output_tokens: int
    """Number of tokens in the generated response."""

    input_tokens: int
    """Number of tokens in the prompt."""

    total_tokens: int
    """Total number of tokens used in the request (prompt + completion)."""

    output_tokens_details: Optional["OutputTokensDetails"] = None
    """Breakdown of tokens used in a response."""

    input_tokens_details: Optional["InputTokensDetails"] = None
    """Breakdown of tokens used in the prompt."""

    def to_backend_compatible_flat_dict(self, parent_key_prefix: str) -> Dict[str, int]:
        result = {**self.__dict__}

        if self.output_tokens_details is not None:
            result["output_tokens_details"] = self.output_tokens_details.model_dump()

        if self.input_tokens_details is not None:
            result["input_tokens_details"] = self.input_tokens_details.model_dump()

        return self.flatten_result_and_add_model_extra(
            result=result, parent_key_prefix=parent_key_prefix
        )

    @classmethod
    def from_original_usage_dict(
        cls, usage_dict: Dict[str, Any]
    ) -> "OpenAIResponsesUsage":
        usage_dict = {**usage_dict}
        output_tokens_details_raw = usage_dict.pop("output_tokens_details", None)
        input_tokens_details_raw = usage_dict.pop("input_tokens_details", None)

        output_tokens_details = (
            OutputTokensDetails(**output_tokens_details_raw)
            if isinstance(output_tokens_details_raw, dict)
            else None
        )

        input_tokens_details = (
            InputTokensDetails(**input_tokens_details_raw)
            if isinstance(input_tokens_details_raw, dict)
            else None
        )

        return cls(
            **usage_dict,
            output_tokens_details=output_tokens_details,
            input_tokens_details=input_tokens_details,
        )


class OutputTokensDetails(pydantic.BaseModel):
    model_config = pydantic.ConfigDict(extra="allow")

    reasoning_tokens: Optional[int] = None
    """Tokens generated by the model for reasoning."""

    # accepted_prediction_tokens: Optional[int] = None
    # """
    # When using Predicted Outputs, the number of tokens in the prediction that
    # appeared in the completion.
    # """

    # audio_tokens: Optional[int] = None
    # """Audio input tokens generated by the model."""

    # rejected_prediction_tokens: Optional[int] = None
    # """
    # When using Predicted Outputs, the number of tokens in the prediction that did
    # not appear in the completion. However, like reasoning tokens, these tokens are
    # still counted in the total completion tokens for purposes of billing, output,
    # and context window limits.
    # """


class InputTokensDetails(pydantic.BaseModel):
    model_config = pydantic.ConfigDict(extra="allow")

    cached_tokens: Optional[int] = None
    """Cached tokens present in the prompt."""

    # audio_tokens: Optional[int] = None
    # """Audio input tokens present in the prompt."""
