import os
import sys

sys.path.insert(
    0, os.path.abspath("../../..")
)  # Adds the parent directory to the system path

from litellm.responses.litellm_completion_transformation.transformation import (
    LiteLLMCompletionResponsesConfig,
)
from litellm.types.llms.openai import (
    ChatCompletionResponseMessage,
    ChatCompletionToolMessage,
)
from litellm.types.utils import (
    Choices,
    CompletionTokensDetailsWrapper,
    Message,
    ModelResponse,
    PromptTokensDetailsWrapper,
    Usage,
)


class TestLiteLLMCompletionResponsesConfig:
    def test_transform_input_file_item_to_file_item_with_file_id(self):
        """Test transformation of input_file item with file_id to Chat Completion file format"""
        # Setup
        input_item = {"type": "input_file", "file_id": "file-abc123xyz"}

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_file_item_to_file_item(
                input_item
            )
        )

        # Assert
        expected = {"type": "file", "file": {"file_id": "file-abc123xyz"}}
        assert result == expected
        assert result["type"] == "file"
        assert result["file"]["file_id"] == "file-abc123xyz"

    def test_transform_input_file_item_to_file_item_with_file_data(self):
        """Test transformation of input_file item with file_data to Chat Completion file format"""
        # Setup
        file_data = "base64encodeddata"
        input_item = {"type": "input_file", "file_data": file_data}

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_file_item_to_file_item(
                input_item
            )
        )

        # Assert
        expected = {"type": "file", "file": {"file_data": file_data}}
        assert result == expected
        assert result["type"] == "file"
        assert result["file"]["file_data"] == file_data

    def test_transform_input_file_item_to_file_item_with_both_fields(self):
        """Test transformation of input_file item with both file_id and file_data"""
        # Setup
        input_item = {
            "type": "input_file",
            "file_id": "file-abc123xyz",
            "file_data": "base64encodeddata",
        }

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_file_item_to_file_item(
                input_item
            )
        )

        # Assert
        expected = {
            "type": "file",
            "file": {"file_id": "file-abc123xyz", "file_data": "base64encodeddata"},
        }
        assert result == expected
        assert result["type"] == "file"
        assert result["file"]["file_id"] == "file-abc123xyz"
        assert result["file"]["file_data"] == "base64encodeddata"

    def test_transform_input_file_item_to_file_item_empty_file_fields(self):
        """Test transformation of input_file item with no file_id or file_data"""
        # Setup
        input_item = {"type": "input_file"}

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_file_item_to_file_item(
                input_item
            )
        )

        # Assert
        expected = {"type": "file", "file": {}}
        assert result == expected
        assert result["type"] == "file"
        assert result["file"] == {}

    def test_transform_input_file_item_to_file_item_ignores_other_fields(self):
        """Test that transformation only includes file_id and file_data, ignoring other fields"""
        # Setup
        input_item = {
            "type": "input_file",
            "file_id": "file-abc123xyz",
            "extra_field": "should_be_ignored",
            "another_field": 123,
        }

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_file_item_to_file_item(
                input_item
            )
        )

        # Assert
        expected = {"type": "file", "file": {"file_id": "file-abc123xyz"}}
        assert result == expected
        assert "extra_field" not in result["file"]
        assert "another_field" not in result["file"]

    def test_transform_input_image_item_to_image_item_with_image_url(self):
        """Test transformation of input_image item with image_url to Chat Completion image format"""
        # Setup
        image_url = "https://example.com/image.png"
        input_item = {"type": "input_image", "image_url": image_url, "detail": "high"}

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_image_item_to_image_item(
                input_item
            )
        )

        # Assert
        expected = {"type": "image_url", "image_url": {"url": image_url, "detail": "high"}}
        assert result == expected
        assert result["type"] == "image_url"
        assert result["image_url"]["url"] == image_url
        assert result["image_url"]["detail"] == "high"

    def test_transform_input_image_item_to_image_item_with_image_data(self):
        """Test transformation of input_image item with image_url to Chat Completion image format"""
        # Setup
        image_url = ""
        input_item = {"type": "input_image", "image_url": image_url, "detail": "high"}

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_image_item_to_image_item(
                input_item
            )
        )

        # Assert
        expected = {"type": "image_url", "image_url": {"url": image_url, "detail": "high"}}
        assert result == expected
        assert result["type"] == "image_url"
        assert result["image_url"]["url"] == image_url
        assert result["image_url"]["detail"] == "high"

    def test_transform_input_image_item_to_image_item_without_detail(self):
        """Test transformation of input_image item with no detail"""
        # Setup
        image_url = "https://example.com/image.png"
        input_item = {"type": "input_image", "image_url": image_url}

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_image_item_to_image_item(
                input_item
            )
        )

        # Assert
        expected = {"type": "image_url", "image_url": {"url": image_url, "detail": "auto"}}
        assert result == expected
        assert result["type"] == "image_url"
        assert result["image_url"]["url"] == image_url
        assert result["image_url"]["detail"] == "auto"

    def test_transform_input_image_item_to_image_item_empty_image_fields(self):
        """Test transformation of input_image item with no image_url or detail"""
        # Setup
        input_item = {"type": "input_image"}

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_image_item_to_image_item(
                input_item
            )
        )

        # Assert
        expected = {"type": "image_url", "image_url": {"url": "", "detail": "auto"}}
        assert result == expected
        assert result["type"] == "image_url"
        assert result["image_url"]["url"] == ""
        assert result["image_url"]["detail"] == "auto"

    def test_transform_input_image_item_to_image_item_ignores_other_fields(self):
        """Test transformation of input_image item with other fields"""
        # Setup
        input_item = {
            "type": "input_image",
            "image_url": "https://example.com/image.png",
            "extra_field": "should_be_ignored",
            "another_field": 123,
        }

        # Execute
        result = (
            LiteLLMCompletionResponsesConfig._transform_input_image_item_to_image_item(
                input_item
            )
        )

        # Assert
        expected = {"type": "image_url", "image_url": {"url": "https://example.com/image.png", "detail": "auto"}}
        assert result == expected
        assert result["type"] == "image_url"
        assert result["image_url"]["url"] == "https://example.com/image.png"
        assert result["image_url"]["detail"] == "auto"
        assert "extra_field" not in result
        assert "another_field" not in result

    def test_transform_chat_completion_response_with_reasoning_content(self):
        """Test that reasoning content is preserved in the full transformation pipeline"""
        # Setup
        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="test-model",
            object="chat.completion",
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(
                        content="The answer is 42.",
                        role="assistant",
                        reasoning_content="Let me think about this step by step. The question asks for the meaning of life, and according to The Hitchhiker's Guide to the Galaxy, the answer is 42.",
                    ),
                )
            ],
        )

        # Execute
        responses_api_response = LiteLLMCompletionResponsesConfig.transform_chat_completion_response_to_responses_api_response(
            request_input="What is the meaning of life?",
            responses_api_request={},
            chat_completion_response=chat_completion_response,
        )

        # Assert
        assert hasattr(responses_api_response, "output")
        assert (
            len(responses_api_response.output) >= 2
        )

        reasoning_items = [
            item for item in responses_api_response.output if item.type == "reasoning"
        ]
        assert len(reasoning_items) == 1, "Should have exactly one reasoning item"

        reasoning_item = reasoning_items[0]
        # Note: ID auto-generation was disabled, so reasoning items may not have IDs
        # Only assert ID format if an ID is present
        if hasattr(reasoning_item, 'id') and reasoning_item.id:
            assert reasoning_item.id.startswith("rs_"), f"Expected ID to start with 'rs_', got: {reasoning_item.id}"
        assert reasoning_item.status == "completed"
        assert reasoning_item.role == "assistant"
        assert len(reasoning_item.content) == 1
        assert reasoning_item.content[0].type == "output_text"
        assert "step by step" in reasoning_item.content[0].text
        assert "42" in reasoning_item.content[0].text

        message_items = [
            item for item in responses_api_response.output if item.type == "message"
        ]
        assert len(message_items) == 1, "Should have exactly one message item"

        message_item = message_items[0]
        assert message_item.content[0].text == "The answer is 42."

    def test_transform_chat_completion_response_without_reasoning_content(self):
        """Test that transformation works normally when no reasoning content is present"""
        # Setup
        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="test-model",
            object="chat.completion",
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(
                        content="Just a regular answer.",
                        role="assistant",
                    ),
                )
            ],
        )

        # Execute
        responses_api_response = LiteLLMCompletionResponsesConfig.transform_chat_completion_response_to_responses_api_response(
            request_input="A simple question?",
            responses_api_request={},
            chat_completion_response=chat_completion_response,
        )

        # Assert
        reasoning_items = [
            item for item in responses_api_response.output if item.type == "reasoning"
        ]
        assert len(reasoning_items) == 0, "Should have no reasoning items"

        message_items = [
            item for item in responses_api_response.output if item.type == "message"
        ]
        assert len(message_items) == 1, "Should have exactly one message item"
        assert message_items[0].content[0].text == "Just a regular answer."

    def test_transform_chat_completion_response_multiple_choices_with_reasoning(self):
        """Test that only reasoning from first choice is included when multiple choices exist"""
        # Setup
        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="test-model",
            object="chat.completion",
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(
                        content="First answer.",
                        role="assistant",
                        reasoning_content="First reasoning process.",
                    ),
                ),
                Choices(
                    finish_reason="stop",
                    index=1,
                    message=Message(
                        content="Second answer.",
                        role="assistant",
                        reasoning_content="Second reasoning process.",
                    ),
                ),
            ],
        )

        # Execute
        responses_api_response = LiteLLMCompletionResponsesConfig.transform_chat_completion_response_to_responses_api_response(
            request_input="A question with multiple answers?",
            responses_api_request={},
            chat_completion_response=chat_completion_response,
        )

        # Assert
        reasoning_items = [
            item for item in responses_api_response.output if item.type == "reasoning"
        ]
        assert len(reasoning_items) == 1, "Should have exactly one reasoning item"
        assert reasoning_items[0].content[0].text == "First reasoning process."

        message_items = [
            item for item in responses_api_response.output if item.type == "message"
        ]
        assert len(message_items) == 2, "Should have two message items"

    def test_transform_chat_completion_response_status_with_stop(self):
        """
        Test that transforming a chat completion response with 'stop' finish_reason
        results in 'completed' status in the responses API response.
        
        This is the main test case for GitHub issue #15714.
        """
        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="gemini-2.5-flash-preview-09-2025",
            object="chat.completion",
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(
                        content="That's completely fine! How can I help you with your test?",
                        role="assistant",
                    ),
                )
            ],
        )

        responses_api_response = (
            LiteLLMCompletionResponsesConfig.transform_chat_completion_response_to_responses_api_response(
                request_input="this is a test",
                responses_api_request={},
                chat_completion_response=chat_completion_response,
            )
        )

        assert responses_api_response.status == "completed"
        assert responses_api_response.status in [
            "completed",
            "failed",
            "in_progress",
            "cancelled",
            "queued",
            "incomplete",
        ]

    def test_transform_chat_completion_response_output_item_status(self):
        """
        Test that output items in the transformed response also have valid status values.
        
        This verifies the fix for GitHub issue #15714.
        """
        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="gemini-2.5-flash-preview-09-2025",
            object="chat.completion",
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(
                        content="Test message",
                        role="assistant",
                    ),
                )
            ],
        )

        responses_api_response = (
            LiteLLMCompletionResponsesConfig.transform_chat_completion_response_to_responses_api_response(
                request_input="this is a test",
                responses_api_request={},
                chat_completion_response=chat_completion_response,
            )
        )

        message_items = [
            item for item in responses_api_response.output if item.type == "message"
        ]
        assert len(message_items) > 0

        for item in message_items:
            assert item.status in [
                "completed",
                "failed",
                "in_progress",
                "cancelled",
                "queued",
                "incomplete",
            ]
            assert item.status != "stop"


class TestFunctionCallTransformation:
    """Test cases for function_call input transformation"""

    def test_function_call_detection(self):
        """Test that function_call items are correctly detected"""
        function_call_item = {
            "type": "function_call",
            "name": "get_weather",
            "arguments": '{"location": "test"}',
            "call_id": "test_id"
        }
        
        function_call_output_item = {
            "type": "function_call_output",
            "call_id": "test_id",
            "output": "result"
        }
        
        regular_message = {
            "type": "message",
            "role": "user",
            "content": "Hello"
        }
        
        # Test function_call detection
        assert LiteLLMCompletionResponsesConfig._is_input_item_function_call(function_call_item)
        assert not LiteLLMCompletionResponsesConfig._is_input_item_function_call(function_call_output_item)
        assert not LiteLLMCompletionResponsesConfig._is_input_item_function_call(regular_message)
        
        # Test function_call_output detection (should still work)
        assert LiteLLMCompletionResponsesConfig._is_input_item_tool_call_output(function_call_output_item)
        assert not LiteLLMCompletionResponsesConfig._is_input_item_tool_call_output(function_call_item)
        assert not LiteLLMCompletionResponsesConfig._is_input_item_tool_call_output(regular_message)

    def test_function_call_transformation(self):
        """Test that function_call items are correctly transformed to assistant messages with tool calls"""
        function_call_item = {
            "type": "function_call",
            "name": "get_weather",
            "arguments": '{"location": "São Paulo, Brazil"}',
            "call_id": "call_123",
            "id": "call_123",
            "status": "completed"
        }
        
        result = LiteLLMCompletionResponsesConfig._transform_responses_api_function_call_to_chat_completion_message(
            function_call=function_call_item
        )
        
        assert len(result) == 1
        message = result[0]
        
        # Should be an assistant message
        assert message.get("role") == "assistant"
        assert message.get("content") is None  # Function calls don't have content
        
        # Should have tool calls
        tool_calls = message.get("tool_calls", [])
        assert len(tool_calls) == 1
        
        tool_call = tool_calls[0]
        assert tool_call.get("id") == "call_123"
        assert tool_call.get("type") == "function"
        
        function = tool_call.get("function", {})
        assert function.get("name") == "get_weather"
        assert function.get("arguments") == '{"location": "São Paulo, Brazil"}'

    def test_complete_input_transformation_with_function_calls(self):
        """Test the complete transformation with the exact input from the issue"""
        test_input = [
            {
                "type": "message",
                "role": "user",
                "content": "How is the weather in São Paulo today ?"
            },
            {
                "type": "function_call",
                "arguments": '{"location": "São Paulo, Brazil"}',
                "call_id": "call_1fe70e2a-a596-45ef-b72c-9b8567c460e5",
                "name": "get_weather",
                "id": "call_1fe70e2a-a596-45ef-b72c-9b8567c460e5",
                "status": "completed"
            },
            {
                "type": "function_call_output",
                "call_id": "call_1fe70e2a-a596-45ef-b72c-9b8567c460e5",
                "output": "Rainy"
            }
        ]
        
        # This should not raise an error (previously would raise "Invalid content type: <class 'NoneType'>")
        messages = LiteLLMCompletionResponsesConfig._transform_response_input_param_to_chat_completion_message(
            input=test_input
        )
        
        assert len(messages) == 3
        
        # First message: user message
        user_msg = messages[0]
        assert user_msg.get("role") == "user"
        assert user_msg.get("content") == "How is the weather in São Paulo today ?"
        
        # Second message: assistant message with tool call
        assistant_msg = messages[1]
        assert assistant_msg.get("role") == "assistant"
        assert assistant_msg.get("tool_calls") is not None
        assert len(assistant_msg.get("tool_calls", [])) == 1
        
        tool_call = assistant_msg.get("tool_calls")[0]
        assert tool_call.get("function", {}).get("name") == "get_weather"
        
        # Third message: tool output
        tool_msg = messages[2]
        assert tool_msg.get("role") == "tool"
        assert tool_msg.get("content") == "Rainy"
        assert tool_msg.get("tool_call_id") == "call_1fe70e2a-a596-45ef-b72c-9b8567c460e5"

    def test_complete_request_transformation_with_function_calls(self):
        """Test the complete request transformation that would be used by the responses API"""
        test_input = [
            {
                "type": "message",
                "role": "user", 
                "content": "How is the weather in São Paulo today ?"
            },
            {
                "type": "function_call",
                "arguments": '{"location": "São Paulo, Brazil"}',
                "call_id": "call_1fe70e2a-a596-45ef-b72c-9b8567c460e5",
                "name": "get_weather",
                "id": "call_1fe70e2a-a596-45ef-b72c-9b8567c460e5",
                "status": "completed"
            },
            {
                "type": "function_call_output",
                "call_id": "call_1fe70e2a-a596-45ef-b72c-9b8567c460e5",
                "output": "Rainy"
            }
        ]
        
        tools = [
            {
                "type": "function",
                "name": "get_weather",
                "description": "Get current temperature for a given location.",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "location": {
                            "type": "string",
                            "description": "City and country e.g. Bogotá, Colombia"
                        }
                    },
                    "required": ["location"],
                    "additionalProperties": False
                }
            }
        ]
        
        responses_api_request = {
            "store": False,
            "tools": tools
        }
        
        # This should work without errors for non-OpenAI models
        result = LiteLLMCompletionResponsesConfig.transform_responses_api_request_to_chat_completion_request(
            model="gemini/gemini-2.0-flash",
            input=test_input,
            responses_api_request=responses_api_request,
            extra_headers={"X-Test-Header": "test-value"}
        )
        
        assert "messages" in result
        assert "model" in result
        assert "tools" in result
        
        messages = result["messages"]
        assert len(messages) == 3
        assert result["model"] == "gemini/gemini-2.0-flash"
        
        # Verify the structure is correct for chat completion
        user_msg = messages[0]
        assert user_msg["role"] == "user"
        
        assistant_msg = messages[1]  
        assert assistant_msg["role"] == "assistant"
        assert "tool_calls" in assistant_msg
        
        tool_msg = messages[2]
        assert tool_msg["role"] == "tool"

        assert result["extra_headers"] == {"X-Test-Header": "test-value"}

    def test_function_call_without_call_id_fallback_to_id(self):
        """Test that function_call items can use 'id' field when 'call_id' is missing"""
        function_call_item = {
            "type": "function_call",
            "name": "get_weather",
            "arguments": '{"location": "test"}',
            "id": "fallback_id"  # Only has 'id', not 'call_id'
        }
        
        result = LiteLLMCompletionResponsesConfig._transform_responses_api_function_call_to_chat_completion_message(
            function_call=function_call_item
        )
        
        assert len(result) == 1
        message = result[0]
        tool_calls = message.get("tool_calls", [])
        assert len(tool_calls) == 1
        
        tool_call = tool_calls[0]
        assert tool_call.get("id") == "fallback_id"


class TestUsageTransformation:
    """Test cases for usage transformation from Chat Completion to Responses API format"""

    def test_transform_usage_with_cached_tokens_anthropic(self):
        """Test that cached_tokens from Anthropic are properly transformed to input_tokens_details"""
        # Setup: Simulate Anthropic usage with cache_read_input_tokens
        usage = Usage(
            prompt_tokens=13,
            completion_tokens=27,
            total_tokens=40,
            prompt_tokens_details=PromptTokensDetailsWrapper(
                cached_tokens=5,  # From Anthropic cache_read_input_tokens
                text_tokens=8,
            ),
        )

        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="claude-sonnet-4",
            object="chat.completion",
            usage=usage,
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(content="Hello!", role="assistant"),
                )
            ],
        )

        # Execute
        response_usage = LiteLLMCompletionResponsesConfig._transform_chat_completion_usage_to_responses_usage(
            chat_completion_response=chat_completion_response
        )

        # Assert
        assert response_usage.input_tokens == 13
        assert response_usage.output_tokens == 27
        assert response_usage.total_tokens == 40
        assert response_usage.input_tokens_details is not None
        assert response_usage.input_tokens_details.cached_tokens == 5
        assert response_usage.input_tokens_details.text_tokens == 8

    def test_transform_usage_with_cached_tokens_gemini(self):
        """Test that cached_tokens from Gemini are properly transformed to input_tokens_details"""
        # Setup: Simulate Gemini usage with cachedContentTokenCount
        usage = Usage(
            prompt_tokens=9,
            completion_tokens=27,
            total_tokens=36,
            prompt_tokens_details=PromptTokensDetailsWrapper(
                cached_tokens=3,  # From Gemini cachedContentTokenCount
                text_tokens=6,
            ),
        )

        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="gemini-2.0-flash",
            object="chat.completion",
            usage=usage,
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(content="Hello!", role="assistant"),
                )
            ],
        )

        # Execute
        response_usage = LiteLLMCompletionResponsesConfig._transform_chat_completion_usage_to_responses_usage(
            chat_completion_response=chat_completion_response
        )

        # Assert
        assert response_usage.input_tokens == 9
        assert response_usage.output_tokens == 27
        assert response_usage.total_tokens == 36
        assert response_usage.input_tokens_details is not None
        assert response_usage.input_tokens_details.cached_tokens == 3
        assert response_usage.input_tokens_details.text_tokens == 6

    def test_transform_usage_with_reasoning_tokens_gemini(self):
        """Test that reasoning_tokens from Gemini are properly transformed to output_tokens_details"""
        # Setup: Simulate Gemini usage with thoughtsTokenCount
        usage = Usage(
            prompt_tokens=10,
            completion_tokens=100,
            total_tokens=110,
            completion_tokens_details=CompletionTokensDetailsWrapper(
                reasoning_tokens=50,  # From Gemini thoughtsTokenCount
                text_tokens=50,
            ),
        )

        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="gemini-2.0-flash",
            object="chat.completion",
            usage=usage,
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(content="Hello!", role="assistant"),
                )
            ],
        )

        # Execute
        response_usage = LiteLLMCompletionResponsesConfig._transform_chat_completion_usage_to_responses_usage(
            chat_completion_response=chat_completion_response
        )

        # Assert
        assert response_usage.output_tokens == 100
        assert response_usage.output_tokens_details is not None
        assert response_usage.output_tokens_details.reasoning_tokens == 50
        assert response_usage.output_tokens_details.text_tokens == 50

    def test_transform_usage_with_cached_and_reasoning_tokens(self):
        """Test transformation with both cached tokens (input) and reasoning tokens (output)"""
        # Setup: Combined Anthropic cached tokens and Gemini reasoning tokens
        usage = Usage(
            prompt_tokens=13,
            completion_tokens=100,
            total_tokens=113,
            prompt_tokens_details=PromptTokensDetailsWrapper(
                cached_tokens=5,  # Anthropic cache_read_input_tokens
                text_tokens=8,
            ),
            completion_tokens_details=CompletionTokensDetailsWrapper(
                reasoning_tokens=50,  # Gemini thoughtsTokenCount
                text_tokens=50,
            ),
        )

        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="claude-sonnet-4",
            object="chat.completion",
            usage=usage,
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(content="Hello!", role="assistant"),
                )
            ],
        )

        # Execute
        response_usage = LiteLLMCompletionResponsesConfig._transform_chat_completion_usage_to_responses_usage(
            chat_completion_response=chat_completion_response
        )

        # Assert
        assert response_usage.input_tokens == 13
        assert response_usage.output_tokens == 100
        assert response_usage.total_tokens == 113
        
        # Verify input_tokens_details
        assert response_usage.input_tokens_details is not None
        assert response_usage.input_tokens_details.cached_tokens == 5
        assert response_usage.input_tokens_details.text_tokens == 8
        
        # Verify output_tokens_details
        assert response_usage.output_tokens_details is not None
        assert response_usage.output_tokens_details.reasoning_tokens == 50
        assert response_usage.output_tokens_details.text_tokens == 50

    def test_transform_usage_with_zero_cached_tokens(self):
        """Test that cached_tokens=0 is properly handled (no cached tokens used)"""
        # Setup: Usage with cached_tokens=0 (no cache hit)
        usage = Usage(
            prompt_tokens=9,
            completion_tokens=27,
            total_tokens=36,
            prompt_tokens_details=PromptTokensDetailsWrapper(
                cached_tokens=0,  # No cache hit
                text_tokens=9,
            ),
        )

        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="claude-sonnet-4",
            object="chat.completion",
            usage=usage,
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(content="Hello!", role="assistant"),
                )
            ],
        )

        # Execute
        response_usage = LiteLLMCompletionResponsesConfig._transform_chat_completion_usage_to_responses_usage(
            chat_completion_response=chat_completion_response
        )

        # Assert: Should still include cached_tokens=0 in input_tokens_details
        assert response_usage.input_tokens_details is not None
        assert response_usage.input_tokens_details.cached_tokens == 0
        assert response_usage.input_tokens_details.text_tokens == 9

    def test_transform_usage_without_details(self):
        """Test transformation when prompt_tokens_details and completion_tokens_details are None"""
        # Setup: Usage without details (basic usage only)
        usage = Usage(
            prompt_tokens=9,
            completion_tokens=27,
            total_tokens=36,
        )

        chat_completion_response = ModelResponse(
            id="test-response-id",
            created=1234567890,
            model="gpt-4o",
            object="chat.completion",
            usage=usage,
            choices=[
                Choices(
                    finish_reason="stop",
                    index=0,
                    message=Message(content="Hello!", role="assistant"),
                )
            ],
        )

        # Execute
        response_usage = LiteLLMCompletionResponsesConfig._transform_chat_completion_usage_to_responses_usage(
            chat_completion_response=chat_completion_response
        )

        # Assert: Basic usage should still be transformed, but details should be None
        assert response_usage.input_tokens == 9
        assert response_usage.output_tokens == 27
        assert response_usage.total_tokens == 36
        assert response_usage.input_tokens_details is None
        assert response_usage.output_tokens_details is None