#!/usr/bin/env python3
# coding=utf-8

"""
Basic functionality tests for Coordinator server
Using FastAPI TestClient for testing
"""
import json
import pytest
from typing import Dict, Any
from fastapi.testclient import TestClient
from unittest.mock import patch, MagicMock

from motor.coordinator.api_server.coordinator_server import (
    CoordinatorServer
)
from motor.config.coordinator import CoordinatorConfig


class TestCoordinatorServer:
    """Mock test class for Coordinator server"""
    
    def setup_method(self):
        """Setup test fixtures"""
        # Mock InstanceManager
        self._im_patcher = patch('motor.coordinator.api_server.coordinator_server.InstanceManager')
        im_mock_cls = self._im_patcher.start()
        im_instance = MagicMock()
        im_instance.is_available.return_value = True
        im_instance.refresh_instances.return_value = None
        im_mock_cls.return_value = im_instance

        # Create unified configuration
        coordinator_config = CoordinatorConfig()
        coordinator_config.init()
        
        # Create CoordinatorServer instance (only pass unified configuration)
        coordinator_server = CoordinatorServer(
            coordinator_config=coordinator_config
        )
        
        # Set instance_manager attribute (required by CoordinatorServer's _handle_openai_request)
        coordinator_server.instance_manager = im_instance
        
        # Setup rate limiting
        coordinator_server.setup_rate_limiting()

        # Get applications
        mgmt_app = coordinator_server.management_app
        inference_app = coordinator_server.inference_app

        # Create TestClient (verify two endpoints separately)
        self.mgmt_client = TestClient(mgmt_app)
        self.openai_client = TestClient(inference_app)
        self.valid_api_key = "sk-test123456789"

    def teardown_method(self):
        """Teardown test fixtures"""
        try:
            if hasattr(self, '_im_patcher'):
                self._im_patcher.stop()
        except Exception:
            pass
    
    def test_health_endpoints(self):
        """Test health check endpoints"""
        # Test /health
        response = self.mgmt_client.get("/health")
        assert response.status_code == 200, f"Health probe failed: {response.status_code}"
        data = response.json()
        assert data["status"] == "ok", f"Health probe status abnormal: {data}"
        
        # Test /startup
        response = self.mgmt_client.get("/startup")
        assert response.status_code == 200, f"Startup probe failed: {response.status_code}"
        data = response.json()
        assert data["status"] == "ok", f"Startup probe status abnormal: {data}"
        
        # Test /readiness
        response = self.mgmt_client.get("/readiness")
        assert response.status_code == 200, f"Readiness check failed: {response.status_code}"
        data = response.json()
        assert data["status"] == "ok", f"Readiness check status abnormal: {data}"
        
        # Test /metrics
        response = self.mgmt_client.get("/metrics")
        assert response.status_code == 200, f"Metrics endpoint failed: {response.status_code}"
        data = response.json()
        assert "status" in data, "Metrics response missing status field"
    
    def test_openai_completions_api(self):
        """Test OpenAI Completions API"""
        test_cases = [
            {
                "name": "Basic completion request",
                "data": {
                    "model": "text-davinci-003",
                    "prompt": "Write a poem about spring",
                    "max_tokens": 100,
                    "temperature": 0.7
                }
            },
            {
                "name": "Completion request with stop tokens",
                "data": {
                    "model": "text-davinci-003",
                    "prompt": "Differences between Python lists and tuples:",
                    "max_tokens": 200,
                    "temperature": 0.8,
                    "stop": ["\n\n", "Summary"]
                }
            },
            {
                "name": "Minimal parameter completion request",
                "data": {
                    "model": "text-davinci-003",
                    "prompt": "Hello"
                }
            }
        ]
        
        for test_case in test_cases:
            response = self.openai_client.post(
                "/v1/completions",
                json=test_case["data"],
                headers={
                    "Content-Type": "application/json",
                    "Authorization": f"Bearer {self.valid_api_key}"
                }
            )
            
            assert response.status_code == 200, f"Completions API failed: {response.status_code}"
            
            data = response.json()
            assert "request_id" in data, "Response missing request_id"
            assert "status" in data, "Response missing status"
            assert "data" in data, "Response missing data field"
            
            # Verify input data parsing
            assert "input_data" in data["data"], "Response data missing input_data"
            assert "is_stream" in data["data"], "Response data missing is_stream"
            assert "request_type" in data["data"], "Response data missing request_type"
    
    def test_openai_chat_completions_api(self):
        """Test OpenAI Chat Completions API"""
        test_cases = [
            {
                "name": "Basic chat completion request",
                "data": {
                    "model": "gpt-3.5-turbo",
                    "messages": [
                        {
                            "role": "user",
                            "content": "Hello, please introduce yourself"
                        }
                    ],
                    "max_tokens": 100,
                    "temperature": 0.7
                }
            },
            {
                "name": "Multi-turn conversation chat completion request",
                "data": {
                    "model": "gpt-3.5-turbo",
                    "messages": [
                        {
                            "role": "user",
                            "content": "What is machine learning?"
                        },
                        {
                            "role": "assistant",
                            "content": "Machine learning is a branch of artificial intelligence..."
                        },
                        {
                            "role": "user",
                            "content": "Can you give an example?"
                        }
                    ],
                    "max_tokens": 200,
                    "temperature": 0.8
                }
            },
            {
                "name": "Chat completion request with system message",
                "data": {
                    "model": "gpt-3.5-turbo",
                    "messages": [
                        {
                            "role": "system",
                            "content": "You are a professional Python programming assistant"
                        },
                        {
                            "role": "user",
                            "content": "Please write a Python implementation of quicksort"
                        }
                    ],
                    "max_tokens": 500,
                    "temperature": 0.5
                }
            },
            {
                "name": "Minimal parameter chat completion request",
                "data": {
                    "model": "gpt-3.5-turbo",
                    "messages": [
                        {
                            "role": "user",
                            "content": "Hello"
                        }
                    ]
                }
            }
        ]
        
        for test_case in test_cases:
            response = self.openai_client.post(
                "/v1/chat/completions",
                json=test_case["data"],
                headers={
                    "Content-Type": "application/json",
                    "Authorization": f"Bearer {self.valid_api_key}"
                }
            )
            
            assert response.status_code == 200, f"Chat Completions API failed: {response.status_code}"
            
            data = response.json()
            assert "request_id" in data, "Response missing request_id"
            assert "status" in data, "Response missing status"
            assert "data" in data, "Response missing data field"
            
            # Verify input data parsing
            assert "input_data" in data["data"], "Response data missing input_data"
            assert "is_stream" in data["data"], "Response data missing is_stream"
            assert "request_type" in data["data"], "Response data missing request_type"
    
    def test_streaming_requests(self):
        """Test streaming requests"""
        # Test completion streaming request
        completion_stream_data = {
            "model": "text-davinci-003",
            "prompt": "Write a Python function to calculate the Fibonacci sequence",
            "max_tokens": 200,
            "temperature": 0.7,
            "stream": True
        }
        
        response = self.openai_client.post(
            "/v1/completions",
            json=completion_stream_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 200, f"Streaming completion request failed: {response.status_code}"
        
        data = response.json()
        assert data["data"]["is_stream"] == True, "Stream flag not set correctly"
        
        # Test chat completion streaming request
        chat_stream_data = {
            "model": "gpt-3.5-turbo",
            "messages": [
                {
                    "role": "user",
                    "content": "Please explain the basic concepts of deep learning in detail"
                }
            ],
            "max_tokens": 300,
            "temperature": 0.7,
            "stream": True
        }
        
        response = self.openai_client.post(
            "/v1/chat/completions",
            json=chat_stream_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 200, f"Streaming chat completion request failed: {response.status_code}"
        
        data = response.json()
        assert data["data"]["is_stream"] == True, "Stream flag not set correctly"
    
    def test_error_handling(self):
        """Test error handling"""
        # Test invalid JSON
        response = self.openai_client.post(
            "/v1/completions",
            data="invalid json",
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        # Should return 400, 500, or 422 error
        assert response.status_code in [400, 422, 500], f"Invalid JSON handling exception: {response.status_code}"
        
        # Test missing required fields
        invalid_data = {
            "prompt": "test"  # Missing model field
        }
        response = self.openai_client.post(
            "/v1/completions",
            json=invalid_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        # Should return 400 or 422 error
        assert response.status_code in [400, 422, 500], f"Missing field handling exception: {response.status_code}"
        
        # Test invalid chat completion request
        invalid_chat_data = {
            "model": "gpt-3.5-turbo",
            "messages": "invalid messages"  # messages should be an array
        }
        response = self.openai_client.post(
            "/v1/chat/completions",
            json=invalid_chat_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        assert response.status_code in [400, 422, 500], f"Invalid chat completion handling exception: {response.status_code}"
    
    def test_rate_limiting(self):
        """Test rate limiting functionality"""
        # Send many requests to test rate limiting
        rate_limited = False
        
        for i in range(150):  # Exceed rate limit threshold
            test_data = {
                "model": "gpt-3.5-turbo",
                "messages": [
                    {
                        "role": "user",
                        "content": f"This is the {i+1}th rate limiting test request"
                    }
                ],
                "max_tokens": 10
            }
            
            response = self.openai_client.post(
                "/v1/chat/completions",
                json=test_data,
                headers={
                    "Content-Type": "application/json",
                    "Authorization": f"Bearer {self.valid_api_key}"
                }
            )
            
            if response.status_code == 429:
                rate_limited = True
                break
        
        # Note: Rate limiting may or may not trigger depending on configuration
        # This test just verifies the endpoint can handle many requests
        assert True, "Rate limiting test completed"
    
    def test_api_key_validation(self):
        """Test API Key validation functionality"""
        # Valid API Keys (from api_key_config.json)
        valid_api_keys = ["sk-test123456789", "sk-coordinator2024"]
        invalid_api_key = "sk-invalid-key"
        
        # Test 1: Request without API Key should fail (401)
        test_data = {
            "model": "gpt-3.5-turbo",
            "messages": [{"role": "user", "content": "test"}],
            "max_tokens": 10
        }
        
        response = self.openai_client.post(
            "/v1/chat/completions",
            json=test_data,
            headers={"Content-Type": "application/json"}
        )
        assert response.status_code == 401, f"Expected 401, got: {response.status_code}"
        error_data = response.json()
        assert "detail" in error_data, "Error response missing detail field"
        
        # Test 2: Invalid API Key should fail (403)
        response = self.openai_client.post(
            "/v1/chat/completions",
            json=test_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {invalid_api_key}"
            }
        )
        assert response.status_code == 403, f"Expected 403, got: {response.status_code}"
        error_data = response.json()
        assert "detail" in error_data, "Error response missing detail field"
        
        # Test 3: Valid API Key should succeed (200)
        for valid_key in valid_api_keys:
            response = self.openai_client.post(
                "/v1/chat/completions",
                json=test_data,
                headers={
                    "Content-Type": "application/json",
                    "Authorization": f"Bearer {valid_key}"
                }
            )
            assert response.status_code == 200, f"Valid API Key request failed: {response.status_code}"
            data = response.json()
            assert "request_id" in data, "Response missing request_id"
        
        # Test 4: API Key without Bearer prefix
        response = self.openai_client.post(
            "/v1/chat/completions",
            json=test_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": valid_api_keys[0]  # Without Bearer prefix
            }
        )
        # Depending on implementation, may fail or succeed without prefix
        assert response.status_code in [200, 401, 403], f"Unexpected status code: {response.status_code}"
        
        # Test 5: Skip paths don't require API Key (/startup, /readiness, etc.)
        skip_paths = ["/startup", "/readiness", "/metrics"]
        for path in skip_paths:
            response = self.mgmt_client.get(f"{path}")
            assert response.status_code == 200, f"Skip path {path} returned non-200 status code: {response.status_code}"
        
        # Test 6: Completions API also requires API Key validation
        completion_data = {
            "model": "text-davinci-003",
            "prompt": "test",
            "max_tokens": 10
        }
        
        # Without API Key
        response = self.openai_client.post(
            "/v1/completions",
            json=completion_data,
            headers={"Content-Type": "application/json"}
        )
        assert response.status_code == 401, f"Expected 401, got: {response.status_code}"
        
        # Valid API Key
        response = self.openai_client.post(
            "/v1/completions",
            json=completion_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {valid_api_keys[0]}"
            }
        )
        assert response.status_code == 200, f"Completions API with valid API Key request failed: {response.status_code}"


class TestFastAPIMiddleware:
    """Test FastAPI middleware functionality"""
    
    def setup_method(self):
        """Setup test fixtures"""
        from motor.coordinator.middleware.fastapi_middleware import (
            SimpleRateLimitMiddleware,
            SimpleRateLimitConfig,
            load_rate_limit_config,
            create_simple_rate_limit_middleware
        )
        from motor.coordinator.middleware.rate_limiter import SimpleRateLimiter
        from fastapi import FastAPI
        from fastapi.testclient import TestClient
        
        self.app = FastAPI()
        self.SimpleRateLimitMiddleware = SimpleRateLimitMiddleware
        self.SimpleRateLimitConfig = SimpleRateLimitConfig
        self.load_rate_limit_config = load_rate_limit_config
        self.create_simple_rate_limit_middleware = create_simple_rate_limit_middleware
        self.SimpleRateLimiter = SimpleRateLimiter
        self.TestClient = TestClient
    
    def test_simple_rate_limit_config(self):
        """Test SimpleRateLimitConfig dataclass"""
        config = self.SimpleRateLimitConfig()
        assert config.enabled == True, "Default enabled should be True"
        assert config.max_requests == 100, "Default max_requests should be 100"
        assert config.window_size == 60, "Default window_size should be 60"
        assert config.scope == "per_ip", "Default scope should be per_ip"
        assert config.skip_paths is not None, "skip_paths should be initialized"
        assert "/health" in config.skip_paths, "/health should be in skip_paths"
    
    def test_load_rate_limit_config_default(self):
        """Test load_rate_limit_config with default values"""
        import os
        # Save original env if exists
        original_enabled = os.getenv("RATE_LIMIT_ENABLED")
        original_max = os.getenv("RATE_LIMIT_MAX_REQUESTS")
        original_window = os.getenv("RATE_LIMIT_WINDOW_SIZE")
        
        try:
            # Remove env vars to test defaults
            if "RATE_LIMIT_ENABLED" in os.environ:
                del os.environ["RATE_LIMIT_ENABLED"]
            if "RATE_LIMIT_MAX_REQUESTS" in os.environ:
                del os.environ["RATE_LIMIT_MAX_REQUESTS"]
            if "RATE_LIMIT_WINDOW_SIZE" in os.environ:
                del os.environ["RATE_LIMIT_WINDOW_SIZE"]
            
            config = self.load_rate_limit_config()
            assert config.enabled == True, "Should use default enabled=True"
            assert config.max_requests == 100, "Should use default max_requests=100"
            assert config.window_size == 60, "Should use default window_size=60"
        finally:
            # Restore original env
            if original_enabled:
                os.environ["RATE_LIMIT_ENABLED"] = original_enabled
            if original_max:
                os.environ["RATE_LIMIT_MAX_REQUESTS"] = original_max
            if original_window:
                os.environ["RATE_LIMIT_WINDOW_SIZE"] = original_window
    
    def test_load_rate_limit_config_from_env(self):
        """Test load_rate_limit_config from environment variables"""
        import os
        import tempfile
        
        # Save original env
        original_enabled = os.getenv("RATE_LIMIT_ENABLED")
        original_max = os.getenv("RATE_LIMIT_MAX_REQUESTS")
        original_window = os.getenv("RATE_LIMIT_WINDOW_SIZE")
        original_scope = os.getenv("RATE_LIMIT_SCOPE")
        original_skip_paths = os.getenv("RATE_LIMIT_SKIP_PATHS")
        
        try:
            # Set env vars
            os.environ["RATE_LIMIT_ENABLED"] = "false"
            os.environ["RATE_LIMIT_MAX_REQUESTS"] = "200"
            os.environ["RATE_LIMIT_WINDOW_SIZE"] = "30"
            os.environ["RATE_LIMIT_SCOPE"] = "global"
            os.environ["RATE_LIMIT_SKIP_PATHS"] = "/health,/metrics"
            
            config = self.load_rate_limit_config()
            assert config.enabled == False, "Should load enabled from env"
            assert config.max_requests == 200, "Should load max_requests from env"
            assert config.window_size == 30, "Should load window_size from env"
            assert config.scope == "global", "Should load scope from env"
            assert "/health" in config.skip_paths, "Should load skip_paths from env"
            assert "/metrics" in config.skip_paths, "Should load skip_paths from env"
        finally:
            # Restore original env
            if original_enabled:
                os.environ["RATE_LIMIT_ENABLED"] = original_enabled
            elif "RATE_LIMIT_ENABLED" in os.environ:
                del os.environ["RATE_LIMIT_ENABLED"]
            if original_max:
                os.environ["RATE_LIMIT_MAX_REQUESTS"] = original_max
            elif "RATE_LIMIT_MAX_REQUESTS" in os.environ:
                del os.environ["RATE_LIMIT_MAX_REQUESTS"]
            if original_window:
                os.environ["RATE_LIMIT_WINDOW_SIZE"] = original_window
            elif "RATE_LIMIT_WINDOW_SIZE" in os.environ:
                del os.environ["RATE_LIMIT_WINDOW_SIZE"]
            if original_scope:
                os.environ["RATE_LIMIT_SCOPE"] = original_scope
            elif "RATE_LIMIT_SCOPE" in os.environ:
                del os.environ["RATE_LIMIT_SCOPE"]
            if original_skip_paths:
                os.environ["RATE_LIMIT_SKIP_PATHS"] = original_skip_paths
            elif "RATE_LIMIT_SKIP_PATHS" in os.environ:
                del os.environ["RATE_LIMIT_SKIP_PATHS"]
    
    def test_load_rate_limit_config_from_file(self):
        """Test load_rate_limit_config from file"""
        import os
        import json
        import tempfile
        
        # Create temporary config file
        config_data = {
            "enabled": False,
            "max_requests": 300,
            "window_size": 45,
            "scope": "per_ip",
            "error_message": "Custom error message",
            "error_status_code": 429
        }
        
        with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
            json.dump(config_data, f)
            config_file = f.name
        
        try:
            config = self.load_rate_limit_config(config_file=config_file)
            assert config.enabled == False, "Should load enabled from file"
            assert config.max_requests == 300, "Should load max_requests from file"
            assert config.window_size == 45, "Should load window_size from file"
            assert config.error_message == "Custom error message", "Should load error_message from file"
        finally:
            os.unlink(config_file)
    
    def test_rate_limit_middleware_skip_paths(self):
        """Test rate limit middleware skip paths"""
        @self.app.get("/test")
        async def test_endpoint():
            return {"status": "ok"}
        
        @self.app.get("/health")
        async def health_endpoint():
            return {"status": "healthy"}
        
        rate_limiter = self.SimpleRateLimiter(max_requests=1, window_size=60)
        middleware = self.SimpleRateLimitMiddleware(
            app=self.app,
            rate_limiter=rate_limiter,
            skip_paths=["/health"]
        )
        
        # Middleware itself is an ASGI app (inherits from BaseHTTPMiddleware)
        client = self.TestClient(middleware)
        
        # /health should be skipped (not rate limited)
        for _ in range(5):
            response = client.get("/health")
            assert response.status_code == 200, "Health endpoint should not be rate limited"
        
        # /test should be rate limited after first request
        response1 = client.get("/test")
        assert response1.status_code == 200, "First request should succeed"
        
        response2 = client.get("/test")
        # May be rate limited depending on timing
        assert response2.status_code in [200, 429], "Second request may be rate limited"
    
    def test_rate_limit_middleware_error_handling(self):
        """Test rate limit middleware error handling"""
        # Create a middleware that will raise an exception
        rate_limiter = MagicMock()
        rate_limiter.is_allowed = MagicMock(side_effect=Exception("Test error"))
        
        @self.app.get("/test")
        async def test_endpoint():
            return {"status": "ok"}
        
        middleware = self.SimpleRateLimitMiddleware(
            app=self.app,
            rate_limiter=rate_limiter,
            skip_paths=[]
        )
        
        # Middleware itself is an ASGI app (inherits from BaseHTTPMiddleware)
        client = self.TestClient(middleware)
        
        # Should allow request when error occurs
        response = client.get("/test")
        assert response.status_code == 200, "Should allow request when error occurs"
        assert middleware.stats["allowed_requests"] > 0, "Should increment allowed_requests on error"
    
    def test_create_simple_rate_limit_middleware(self):
        """Test create_simple_rate_limit_middleware function"""
        middleware = self.create_simple_rate_limit_middleware(
            app=self.app,
            max_requests=50,
            window_size=30
        )
        
        assert middleware is not None, "Middleware should be created"
        assert middleware.rate_limiter.max_requests == 50, "Should set max_requests"
        assert middleware.rate_limiter.window_size == 30, "Should set window_size"
        assert middleware.skip_paths is not None, "Should set skip_paths"
    
    def test_rate_limit_middleware_stats(self):
        """Test rate limit middleware statistics"""
        @self.app.get("/test")
        async def test_endpoint():
            return {"status": "ok"}
        
        rate_limiter = self.SimpleRateLimiter(max_requests=10, window_size=60)
        middleware = self.SimpleRateLimitMiddleware(
            app=self.app,
            rate_limiter=rate_limiter,
            skip_paths=[]
        )
        
        # Middleware itself is an ASGI app (inherits from BaseHTTPMiddleware)
        client = self.TestClient(middleware)
        
        # Make some requests
        for _ in range(5):
            client.get("/test")
        
        assert middleware.stats["total_requests"] >= 5, "Should track total requests"
        assert middleware.stats["allowed_requests"] >= 5, "Should track allowed requests"
        assert "start_time" in middleware.stats, "Should track start time"


class TestCoordinatorServerAdvanced:
    """Advanced functionality test class for Coordinator server"""
    
    def setup_method(self):
        """Setup test fixtures"""
        # Mock InstanceManager
        self._im_patcher = patch('motor.coordinator.api_server.coordinator_server.InstanceManager')
        im_mock_cls = self._im_patcher.start()
        im_instance = MagicMock()
        im_instance.is_available.return_value = True
        im_instance.refresh_instances.return_value = None
        im_mock_cls.return_value = im_instance
        
        # Mock InstanceHealthChecker
        self._hc_patcher = patch('motor.coordinator.api_server.coordinator_server.InstanceHealthChecker')
        hc_mock_cls = self._hc_patcher.start()
        hc_instance = MagicMock()
        hc_instance.check_state_alarm.return_value = True
        hc_mock_cls.return_value = hc_instance
        
        # Mock ControllerClient
        self._cc_patcher = patch('motor.coordinator.api_server.coordinator_server.ControllerClient')
        cc_mock_cls = self._cc_patcher.start()
        cc_instance = MagicMock()
        cc_mock_cls.return_value = cc_instance
        
        # Create unified configuration
        coordinator_config = CoordinatorConfig()
        coordinator_config.init()
        
        # Create CoordinatorServer instance
        coordinator_server = CoordinatorServer(
            coordinator_config=coordinator_config
        )
        
        # Set instance_manager attribute
        coordinator_server.instance_manager = im_instance
        
        # Setup rate limiting
        coordinator_server.setup_rate_limiting()
        
        self.coordinator_server = coordinator_server
        self.mgmt_client = TestClient(coordinator_server.management_app)
        self.valid_api_key = "sk-test123456789"

    def teardown_method(self):
        """Teardown test fixtures"""
        try:
            if hasattr(self, '_im_patcher'):
                self._im_patcher.stop()
            if hasattr(self, '_hc_patcher'):
                self._hc_patcher.stop()
            if hasattr(self, '_cc_patcher'):
                self._cc_patcher.stop()
        except Exception:
            pass
    
    def test_refresh_instances_valid_request(self):
        """Test refresh_instances with valid request"""
        valid_body = {
            "event": "add",
            "instances": [
                {
                    "job_name": "test-job",
                    "model_name": "test-model",
                    "id": 1,
                    "role": "prefill",
                    "endpoints": {
                        "192.168.1.1": {
                            "0": {
                                "id": 0,
                                "ip": "192.168.1.1",
                                "port": "8080"
                            }
                        }
                    }
                }
            ]
        }
        
        response = self.mgmt_client.post(
            "/v1/instances/refresh",
            json=valid_body
        )
        
        assert response.status_code == 200, f"Refresh instances failed: {response.status_code}"
        data = response.json()
        assert data["status"] == "success", f"Refresh instances status abnormal: {data}"
        assert "request_id" in data, "Response missing request_id"
        assert "data" in data, "Response missing data field"
    
    def test_refresh_instances_empty_body(self):
        """Test refresh_instances with empty body"""
        response = self.mgmt_client.post(
            "/v1/instances/refresh",
            json={}
        )
        
        # Should return 400 for empty body
        assert response.status_code == 400, f"Expected 400 for empty body, got: {response.status_code}"
    
    def test_refresh_instances_invalid_json(self):
        """Test refresh_instances with invalid JSON"""
        response = self.mgmt_client.post(
            "/v1/instances/refresh",
            data="invalid json",
            headers={"Content-Type": "application/json"}
        )
        
        # Should return 400 or 422 for invalid JSON
        assert response.status_code in [400, 422, 500], f"Expected 400/422/500 for invalid JSON, got: {response.status_code}"
    
    def test_refresh_instances_with_endpoints_conversion(self):
        """Test refresh_instances with endpoint ID conversion"""
        valid_body = {
            "event": "add",
            "instances": [
                {
                    "job_name": "test-job",
                    "model_name": "test-model",
                    "id": 2,
                    "role": "prefill",
                    "endpoints": {
                        "192.168.1.2": {
                            "0": {
                                "id": 0,
                                "ip": "192.168.1.2",
                                "port": "8080"
                            },
                            "1": {
                                "id": 1,
                                "ip": "192.168.1.2",
                                "port": "8081"
                            }
                        }
                    }
                }
            ]
        }
        
        response = self.mgmt_client.post(
            "/v1/instances/refresh",
            json=valid_body
        )
        
        assert response.status_code == 200, f"Refresh instances failed: {response.status_code}"
        data = response.json()
        assert data["status"] == "success", f"Refresh instances status abnormal: {data}"
    
    def test_refresh_instances_invalid_event_msg(self):
        """Test refresh_instances with invalid event message format"""
        invalid_body = {
            "event": "INVALID_EVENT",
            "instances": "not a list"  # Invalid format
        }
        
        response = self.mgmt_client.post(
            "/v1/instances/refresh",
            json=invalid_body
        )
        
        # Should return 400 for invalid format
        assert response.status_code == 400, f"Expected 400 for invalid format, got: {response.status_code}"
    
    def test_refresh_instances_no_body(self):
        """Test refresh_instances with no body"""
        response = self.mgmt_client.post(
            "/v1/instances/refresh",
            content=None
        )
        
        # Should return 400 for no body
        assert response.status_code == 400, f"Expected 400 for no body, got: {response.status_code}"
    
    def test_create_unified_app(self):
        """Test create_unified_app method"""
        unified_app = self.coordinator_server.create_unified_app()
        
        assert unified_app is not None, "Unified app should be created"
        
        # Test that unified app has routes from both management and inference apps
        unified_client = TestClient(unified_app)
        
        # Test management route
        response = unified_client.get("/health")
        assert response.status_code == 200, "Health endpoint should be available in unified app"
        
        # Test inference route
        response = unified_client.post(
            "/v1/chat/completions",
            json={
                "model": "gpt-3.5-turbo",
                "messages": [{"role": "user", "content": "test"}]
            },
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        assert response.status_code == 200, "Chat completions endpoint should be available in unified app"
    
    def test_create_unified_app_with_rate_limit_disabled(self):
        """Test create_unified_app with rate limit disabled"""
        # Create a config with rate limit disabled
        coordinator_config = CoordinatorConfig()
        coordinator_config.init()
        coordinator_config.rate_limit_config.enabled = False
        
        coordinator_server = CoordinatorServer(
            coordinator_config=coordinator_config
        )
        coordinator_server.instance_manager = MagicMock()
        
        unified_app = coordinator_server.create_unified_app()
        assert unified_app is not None, "Unified app should be created even with rate limit disabled"
    
    def test_create_unified_app_with_custom_rate_limit_config(self):
        """Test create_unified_app with custom rate limit config"""
        from motor.config.coordinator import RateLimitConfig
        
        custom_rate_limit_config = RateLimitConfig()
        custom_rate_limit_config.enabled = True
        custom_rate_limit_config.max_requests = 50
        custom_rate_limit_config.window_size = 30
        
        unified_app = self.coordinator_server.create_unified_app(
            rate_limit_config=custom_rate_limit_config
        )
        
        assert unified_app is not None, "Unified app should be created with custom rate limit config"
    
    def test_copy_routes_skip_paths(self):
        """Test _copy_routes with skip paths"""
        from fastapi import FastAPI
        
        src_app = FastAPI()
        
        @src_app.get("/test")
        async def test():
            return {"status": "ok"}
        
        @src_app.get("/docs")
        async def docs():
            return {"status": "docs"}
        
        # Create dst_app with docs disabled to avoid FastAPI auto-generated docs
        dst_app = FastAPI(docs_url=None, redoc_url=None, openapi_url=None)
        
        self.coordinator_server._copy_routes(src_app, dst_app, skip_paths=["/docs"])
        
        dst_client = TestClient(dst_app)
        
        # /test should be copied
        response = dst_client.get("/test")
        assert response.status_code == 200, "/test route should be copied"
        
        # /docs should be skipped (not copied from src_app, and FastAPI docs disabled)
        response = dst_client.get("/docs")
        assert response.status_code == 404, "/docs route should be skipped"
    
    def test_validate_openai_request_invalid_model(self):
        """Test _validate_openai_request with missing model"""
        # This tests the validation logic indirectly through the endpoint
        invalid_data = {
            "messages": [{"role": "user", "content": "test"}]
            # Missing model field
        }
        
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/chat/completions",
            json=invalid_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 400, f"Expected 400 for missing model, got: {response.status_code}"
    
    def test_validate_openai_request_invalid_messages(self):
        """Test _validate_openai_request with invalid messages"""
        invalid_data = {
            "model": "gpt-3.5-turbo",
            "messages": "not a list"  # Invalid format
        }
        
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/chat/completions",
            json=invalid_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 400, f"Expected 400 for invalid messages, got: {response.status_code}"
    
    def test_validate_openai_request_empty_messages(self):
        """Test _validate_openai_request with empty messages list"""
        invalid_data = {
            "model": "gpt-3.5-turbo",
            "messages": []
        }
        
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/chat/completions",
            json=invalid_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 400, f"Expected 400 for empty messages, got: {response.status_code}"
    
    def test_validate_openai_request_invalid_message_format(self):
        """Test _validate_openai_request with invalid message format"""
        invalid_data = {
            "model": "gpt-3.5-turbo",
            "messages": [
                "not a dict"  # Invalid message format
            ]
        }
        
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/chat/completions",
            json=invalid_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 400, f"Expected 400 for invalid message format, got: {response.status_code}"
    
    def test_validate_openai_request_missing_role_or_content(self):
        """Test _validate_openai_request with missing role or content"""
        invalid_data = {
            "model": "gpt-3.5-turbo",
            "messages": [
                {"role": "user"}  # Missing content
            ]
        }
        
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/chat/completions",
            json=invalid_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 400, f"Expected 400 for missing content, got: {response.status_code}"
    
    def test_validate_openai_request_invalid_role(self):
        """Test _validate_openai_request with invalid role"""
        invalid_data = {
            "model": "gpt-3.5-turbo",
            "messages": [
                {"role": "invalid_role", "content": "test"}
            ]
        }
        
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/chat/completions",
            json=invalid_data,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 400, f"Expected 400 for invalid role, got: {response.status_code}"
    
    def test_handle_openai_request_unavailable_instances(self):
        """Test _handle_openai_request when instances are unavailable"""
        # Mock instance_manager to return False for is_available
        self.coordinator_server.instance_manager.is_available = MagicMock(return_value=False)
        
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/chat/completions",
            json={
                "model": "gpt-3.5-turbo",
                "messages": [{"role": "user", "content": "test"}]
            },
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 503, f"Expected 503 for unavailable instances, got: {response.status_code}"
    
    def test_handle_openai_request_with_prompt(self):
        """Test _handle_openai_request with prompt field (completions API)"""
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/completions",
            json={
                "model": "text-davinci-003",
                "prompt": "Hello world"
            },
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 200, f"Completions API failed: {response.status_code}"
        data = response.json()
        assert data["data"]["input_data"] == "Hello world", "Prompt should be extracted correctly"
    
    def test_handle_openai_request_empty_input(self):
        """Test _handle_openai_request with empty input"""
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/completions",
            json={
                "model": "text-davinci-003"
                # Missing prompt and messages
            },
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        # Should return 400 for missing required fields
        assert response.status_code == 400, f"Expected 400 for missing prompt/messages, got: {response.status_code}"
    
    def test_openai_is_stream(self):
        """Test _openai_is_stream method"""
        # Test with stream=True
        assert self.coordinator_server._openai_is_stream({"stream": True}) == True
        
        # Test with stream=False
        assert self.coordinator_server._openai_is_stream({"stream": False}) == False
        
        # Test without stream field
        assert self.coordinator_server._openai_is_stream({}) == False
        
        # Test with stream as string
        assert self.coordinator_server._openai_is_stream({"stream": "true"}) == True  # Truthy value
    
    def test_refresh_instances_with_complex_endpoints(self):
        """Test refresh_instances with complex endpoint structures"""
        complex_body = {
            "event": "add",
            "instances": [
                {
                    "job_name": "test-job",
                    "model_name": "test-model",
                    "id": 3,
                    "role": "prefill",
                    "endpoints": {
                        "192.168.1.3": {
                            "0": {
                                "id": 0,
                                "ip": "192.168.1.3",
                                "port": "8080"
                            },
                            "1": {
                                "id": 1,
                                "ip": "192.168.1.3",
                                "port": "8081"
                            }
                        },
                        "192.168.1.4": {
                            "2": {
                                "id": 2,
                                "ip": "192.168.1.4",
                                "port": "9000"
                            }
                        }
                    }
                }
            ]
        }
        
        response = self.mgmt_client.post(
            "/v1/instances/refresh",
            json=complex_body
        )
        
        assert response.status_code == 200, f"Refresh instances failed: {response.status_code}"
        data = response.json()
        assert data["status"] == "success", f"Refresh instances status abnormal: {data}"
    
    def test_refresh_instances_with_non_dict_endpoints(self):
        """Test refresh_instances with non-dict endpoints value"""
        invalid_body = {
            "event": "add",
            "instances": [
                {
                    "job_name": "test-job",
                    "model_name": "test-model",
                    "id": 4,
                    "role": "prefill",
                    "endpoints": "not a dict"  # Invalid endpoints format
                }
            ]
        }
        
        response = self.mgmt_client.post(
            "/v1/instances/refresh",
            json=invalid_body
        )
        
        # Should return 400 for invalid format (endpoints must be a dict)
        assert response.status_code == 400, f"Expected 400 for invalid endpoints format, got: {response.status_code}"
    
    def test_refresh_instances_with_non_dict_endpoint_data(self):
        """Test refresh_instances with non-dict endpoint data"""
        invalid_body = {
            "event": "add",
            "instances": [
                {
                    "job_name": "test-job",
                    "model_name": "test-model",
                    "id": 5,
                    "role": "prefill",
                    "endpoints": {
                        "192.168.1.5": "not a dict"  # Invalid endpoint data format
                    }
                }
            ]
        }
        
        response = self.mgmt_client.post(
            "/v1/instances/refresh",
            json=invalid_body
        )
        
        # Should return 400 for invalid format (endpoint data must be a dict)
        assert response.status_code == 400, f"Expected 400 for invalid endpoint data format, got: {response.status_code}"
    
    def test_timeout_handler(self):
        """Test timeout handler decorator"""
        # This tests the timeout handler indirectly through endpoints
        # The timeout handler should not raise errors for normal requests
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/chat/completions",
            json={
                "model": "gpt-3.5-turbo",
                "messages": [{"role": "user", "content": "test"}]
            },
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        assert response.status_code == 200, "Timeout handler should not block normal requests"
    
    def test_verify_api_key_skip_paths(self):
        """Test verify_api_key with skip paths"""
        # Test that skip paths don't require API key
        inference_client = TestClient(self.coordinator_server.inference_app)
        
        # These paths should not require API key (tested indirectly through skip_paths)
        # The actual skip paths are configured in api_key_config
    
    def test_lifespan_context_manager(self):
        """Test lifespan context manager"""
        # Test that the lifespan context manager can be entered and exited
        from fastapi import FastAPI
        
        app = FastAPI(lifespan=self.coordinator_server._lifespan)
        client = TestClient(app)
        
        # The lifespan should work correctly
        response = client.get("/")
        # Should not raise errors
        assert True, "Lifespan context manager works correctly"
    
    def test_setup_rate_limiting_with_disabled_config(self):
        """Test setup_rate_limiting with disabled config"""
        from motor.config.coordinator import RateLimitConfig
        
        # Create a config with rate limit disabled
        disabled_config = RateLimitConfig()
        disabled_config.enabled = False
        
        coordinator_server = CoordinatorServer(
            coordinator_config=CoordinatorConfig()
        )
        coordinator_server.instance_manager = MagicMock()
        
        # Should not raise errors
        coordinator_server.setup_rate_limiting(rate_limit_config=disabled_config)
        assert True, "Setup rate limiting with disabled config works correctly"
    
    def test_setup_rate_limiting_with_exception(self):
        """Test setup_rate_limiting exception handling"""
        # Mock create_simple_rate_limit_middleware to raise exception
        with patch('motor.coordinator.api_server.coordinator_server.create_simple_rate_limit_middleware') as mock_create:
            mock_create.side_effect = Exception("Test exception")
            
            coordinator_server = CoordinatorServer(
                coordinator_config=CoordinatorConfig()
            )
            coordinator_server.instance_manager = MagicMock()
            
            # Should handle exception gracefully
            coordinator_server.setup_rate_limiting()
            assert True, "Setup rate limiting handles exceptions correctly"
    
    def test_create_unified_app_with_exception(self):
        """Test create_unified_app exception handling"""
        # Mock create_simple_rate_limit_middleware to raise exception
        with patch('motor.coordinator.api_server.coordinator_server.create_simple_rate_limit_middleware') as mock_create:
            mock_create.side_effect = Exception("Test exception")
            
            # Should handle exception gracefully
            unified_app = self.coordinator_server.create_unified_app()
            assert unified_app is not None, "Unified app should be created even with exceptions"
    
    def test_copy_routes_with_exception(self):
        """Test _copy_routes exception handling"""
        from fastapi import FastAPI
        
        src_app = FastAPI()
        
        # Create a route that might cause issues
        @src_app.get("/test")
        async def test():
            return {"status": "ok"}
        
        dst_app = FastAPI()
        
        # Mock route to raise exception
        with patch.object(src_app.router, 'routes', new=[MagicMock()]):
            # Should handle exception gracefully
            self.coordinator_server._copy_routes(src_app, dst_app)
            assert True, "Copy routes handles exceptions correctly"
    
    def test_handle_openai_request_json_decode_error(self):
        """Test _handle_openai_request with JSON decode error"""
        inference_client = TestClient(self.coordinator_server.inference_app)
        
        # Send invalid JSON
        response = inference_client.post(
            "/v1/chat/completions",
            data="invalid json",
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        # Should return error status code
        assert response.status_code in [400, 422, 500], f"Expected error for invalid JSON, got: {response.status_code}"
    
    def test_handle_openai_request_general_exception(self):
        """Test _handle_openai_request with general exception"""
        # Mock instance_manager.is_available to raise exception
        self.coordinator_server.instance_manager.is_available = MagicMock(side_effect=Exception("Test exception"))
        
        inference_client = TestClient(self.coordinator_server.inference_app)
        response = inference_client.post(
            "/v1/chat/completions",
            json={
                "model": "gpt-3.5-turbo",
                "messages": [{"role": "user", "content": "test"}]
            },
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.valid_api_key}"
            }
        )
        
        # Should return 500 error
        assert response.status_code == 500, f"Expected 500 for exception, got: {response.status_code}"


class TestFastAPIMiddlewareAdvanced:
    """Test FastAPI middleware advanced functionality"""
    
    def setup_method(self):
        """Setup test fixtures"""
        from motor.coordinator.middleware.fastapi_middleware import (
            SimpleRateLimitMiddleware,
            SimpleRateLimitConfig,
            load_rate_limit_config,
            create_simple_rate_limit_middleware
        )
        from motor.coordinator.middleware.rate_limiter import SimpleRateLimiter
        from fastapi import FastAPI
        from fastapi.testclient import TestClient
        
        self.app = FastAPI()
        self.SimpleRateLimitMiddleware = SimpleRateLimitMiddleware
        self.SimpleRateLimitConfig = SimpleRateLimitConfig
        self.load_rate_limit_config = load_rate_limit_config
        self.create_simple_rate_limit_middleware = create_simple_rate_limit_middleware
        self.SimpleRateLimiter = SimpleRateLimiter
        self.TestClient = TestClient
    
    def test_rate_limit_middleware_extract_request_data(self):
        """Test _extract_request_data method"""
        @self.app.get("/test")
        async def test_endpoint():
            return {"status": "ok"}
        
        rate_limiter = self.SimpleRateLimiter(max_requests=10, window_size=60)
        middleware = self.SimpleRateLimitMiddleware(
            app=self.app,
            rate_limiter=rate_limiter,
            skip_paths=[]
        )
        
        client = self.TestClient(middleware)
        
        # Make a request to trigger _extract_request_data
        response = client.get("/test")
        assert response.status_code == 200, "Request should succeed"
        assert middleware.stats["total_requests"] > 0, "Should extract request data"
    
    def test_rate_limit_middleware_create_rate_limit_headers(self):
        """Test _create_rate_limit_headers method"""
        @self.app.get("/test")
        async def test_endpoint():
            return {"status": "ok"}
        
        rate_limiter = self.SimpleRateLimiter(max_requests=10, window_size=60)
        middleware = self.SimpleRateLimitMiddleware(
            app=self.app,
            rate_limiter=rate_limiter,
            skip_paths=[]
        )
        
        client = self.TestClient(middleware)
        
        # Make a request to trigger header creation
        response = client.get("/test")
        assert response.status_code == 200, "Request should succeed"
        # Check if headers are present
        assert "X-RateLimit-Remaining" in response.headers or "X-RateLimit-Limit" in response.headers, "Should create rate limit headers"
    
    def test_rate_limit_middleware_dispatch_exception(self):
        """Test dispatch method exception handling"""
        @self.app.get("/test")
        async def test_endpoint():
            return {"status": "ok"}
        
        rate_limiter = MagicMock()
        rate_limiter.is_allowed = MagicMock(side_effect=Exception("Test error"))
        
        middleware = self.SimpleRateLimitMiddleware(
            app=self.app,
            rate_limiter=rate_limiter,
            skip_paths=[]
        )
        
        client = self.TestClient(middleware)
        
        # Should allow request when error occurs
        response = client.get("/test")
        assert response.status_code == 200, "Should allow request when error occurs"
        assert middleware.stats["allowed_requests"] > 0, "Should increment allowed_requests on error"
    
    def test_rate_limit_middleware_should_skip_path(self):
        """Test _should_skip_path method"""
        @self.app.get("/test")
        async def test_endpoint():
            return {"status": "ok"}
        
        @self.app.get("/health")
        async def health_endpoint():
            return {"status": "healthy"}
        
        rate_limiter = self.SimpleRateLimiter(max_requests=1, window_size=60)
        middleware = self.SimpleRateLimitMiddleware(
            app=self.app,
            rate_limiter=rate_limiter,
            skip_paths=["/health"]
        )
        
        client = self.TestClient(middleware)
        
        # /health should be skipped
        response1 = client.get("/health")
        assert response1.status_code == 200, "Health endpoint should not be rate limited"
        
        response2 = client.get("/health")
        assert response2.status_code == 200, "Health endpoint should still not be rate limited"
        
        # /test should be rate limited
        response3 = client.get("/test")
        assert response3.status_code == 200, "First request should succeed"
        
        # Second request may be rate limited
        response4 = client.get("/test")
        assert response4.status_code in [200, 429], "Second request may be rate limited"
    
    def test_load_rate_limit_config_file_not_found(self):
        """Test load_rate_limit_config with non-existent file"""
        config = self.load_rate_limit_config(config_file="/nonexistent/config.json")
        assert config is not None, "Should return default config when file not found"
        assert config.enabled == True, "Should use default enabled value"
    
    def test_load_rate_limit_config_invalid_json(self):
        """Test load_rate_limit_config with invalid JSON file"""
        import tempfile
        import os
        
        # Create temporary file with invalid JSON
        with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
            f.write("invalid json content")
            config_file = f.name
        
        try:
            config = self.load_rate_limit_config(config_file=config_file)
            assert config is not None, "Should return default config when JSON is invalid"
        finally:
            os.unlink(config_file)
    
    def test_simple_rate_limit_config_post_init(self):
        """Test SimpleRateLimitConfig __post_init__"""
        config = self.SimpleRateLimitConfig()
        assert config.skip_paths is not None, "skip_paths should be initialized"
        assert "/health" in config.skip_paths, "/health should be in skip_paths"
        assert "/ready" in config.skip_paths, "/ready should be in skip_paths"
        assert "/metrics" in config.skip_paths, "/metrics should be in skip_paths"
    
    def test_create_simple_rate_limit_middleware_defaults(self):
        """Test create_simple_rate_limit_middleware with default parameters"""
        middleware = self.create_simple_rate_limit_middleware(
            app=self.app
        )
        
        assert middleware is not None, "Middleware should be created"
        assert middleware.rate_limiter.max_requests == 100, "Should use default max_requests"
        assert middleware.rate_limiter.window_size == 60, "Should use default window_size"