"""
Performance optimization middleware for FastAPI
"""
import time
import asyncio
from typing import Callable, Any, Optional
from fastapi import FastAPI, Request, Response
from fastapi.responses import JSONResponse
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.middleware.gzip import GZipMiddleware
import redis.asyncio as redis
import hashlib
import json
import logging
from functools import wraps

logger = logging.getLogger(__name__)

class PerformanceMiddleware(BaseHTTPMiddleware):
    """
    Middleware for monitoring and optimizing API performance
    """
    
    def __init__(self, app, slow_request_threshold: float = 1.0):
        super().__init__(app)
        self.slow_request_threshold = slow_request_threshold
    
    async def dispatch(self, request: Request, call_next):
        # Start timing
        start_time = time.perf_counter()
        
        # Add request ID for tracing
        request_id = request.headers.get("X-Request-ID", str(time.time()))
        
        # Process request
        response = await call_next(request)
        
        # Calculate processing time
        process_time = time.perf_counter() - start_time
        
        # Add performance headers
        response.headers["X-Process-Time"] = str(process_time)
        response.headers["X-Request-ID"] = request_id
        
        # Log slow requests
        if process_time > self.slow_request_threshold:
            logger.warning(
                f"Slow request detected: {request.method} {request.url.path} "
                f"took {process_time:.3f}s (threshold: {self.slow_request_threshold}s)"
            )
        
        return response


class CacheMiddleware(BaseHTTPMiddleware):
    """
    Redis-based caching middleware for GET requests
    """
    
    def __init__(
        self, 
        app, 
        redis_client: redis.Redis,
        cache_ttl: int = 300,
        exclude_paths: Optional[list] = None
    ):
        super().__init__(app)
        self.redis_client = redis_client
        self.cache_ttl = cache_ttl
        self.exclude_paths = exclude_paths or ["/health", "/docs", "/openapi.json"]
    
    async def dispatch(self, request: Request, call_next):
        # Only cache GET requests
        if request.method != "GET":
            return await call_next(request)
        
        # Skip excluded paths
        if any(request.url.path.startswith(path) for path in self.exclude_paths):
            return await call_next(request)
        
        # Generate cache key
        cache_key = self._generate_cache_key(request)
        
        try:
            # Try to get from cache
            cached_response = await self.redis_client.get(cache_key)
            
            if cached_response:
                # Parse cached response
                cached_data = json.loads(cached_response)
                return JSONResponse(
                    content=cached_data["content"],
                    status_code=cached_data["status_code"],
                    headers={
                        **cached_data.get("headers", {}),
                        "X-Cache": "HIT"
                    }
                )
        except Exception as e:
            logger.error(f"Cache retrieval error: {e}")
        
        # Process request if not cached
        response = await call_next(request)
        
        # Cache successful responses
        if response.status_code == 200:
            try:
                # Read response body
                body = b""
                async for chunk in response.body_iterator:
                    body += chunk
                
                # Cache the response
                cache_data = {
                    "content": json.loads(body),
                    "status_code": response.status_code,
                    "headers": dict(response.headers)
                }
                
                await self.redis_client.setex(
                    cache_key,
                    self.cache_ttl,
                    json.dumps(cache_data)
                )
                
                # Return response with cache miss header
                return JSONResponse(
                    content=cache_data["content"],
                    status_code=response.status_code,
                    headers={
                        **response.headers,
                        "X-Cache": "MISS"
                    }
                )
            except Exception as e:
                logger.error(f"Cache storage error: {e}")
        
        return response
    
    def _generate_cache_key(self, request: Request) -> str:
        """Generate unique cache key based on request"""
        key_parts = [
            request.url.path,
            str(sorted(request.query_params.items())),
            request.headers.get("authorization", "")
        ]
        
        key_string = ":".join(key_parts)
        return f"cache:{hashlib.md5(key_string.encode()).hexdigest()}"


class RateLimitMiddleware(BaseHTTPMiddleware):
    """
    Rate limiting middleware using sliding window
    """
    
    def __init__(
        self, 
        app,
        redis_client: redis.Redis,
        requests_per_minute: int = 60,
        burst_size: int = 10
    ):
        super().__init__(app)
        self.redis_client = redis_client
        self.requests_per_minute = requests_per_minute
        self.burst_size = burst_size
        self.window_size = 60  # seconds
    
    async def dispatch(self, request: Request, call_next):
        # Get client identifier (IP or user ID)
        client_id = self._get_client_id(request)
        
        # Check rate limit
        is_allowed = await self._check_rate_limit(client_id)
        
        if not is_allowed:
            return JSONResponse(
                status_code=429,
                content={
                    "detail": "Rate limit exceeded. Please try again later."
                },
                headers={
                    "X-RateLimit-Limit": str(self.requests_per_minute),
                    "Retry-After": "60"
                }
            )
        
        # Process request
        response = await call_next(request)
        
        # Add rate limit headers
        remaining = await self._get_remaining_requests(client_id)
        response.headers["X-RateLimit-Limit"] = str(self.requests_per_minute)
        response.headers["X-RateLimit-Remaining"] = str(remaining)
        
        return response
    
    def _get_client_id(self, request: Request) -> str:
        """Get client identifier from request"""
        # Try to get from authorization header first
        auth = request.headers.get("authorization")
        if auth:
            return f"user:{hashlib.md5(auth.encode()).hexdigest()}"
        
        # Fall back to IP address
        client_ip = request.client.host if request.client else "unknown"
        return f"ip:{client_ip}"
    
    async def _check_rate_limit(self, client_id: str) -> bool:
        """Check if request is within rate limit"""
        key = f"rate_limit:{client_id}"
        current_time = int(time.time())
        
        # Use Lua script for atomic operation
        lua_script = """
        local key = KEYS[1]
        local current_time = tonumber(ARGV[1])
        local window_size = tonumber(ARGV[2])
        local max_requests = tonumber(ARGV[3])
        local burst_size = tonumber(ARGV[4])
        
        -- Remove old entries
        redis.call('ZREMRANGEBYSCORE', key, 0, current_time - window_size)
        
        -- Count current requests
        local current_count = redis.call('ZCARD', key)
        
        -- Check burst
        local recent_count = redis.call('ZCOUNT', key, current_time - 10, current_time)
        
        if current_count >= max_requests or recent_count >= burst_size then
            return 0
        end
        
        -- Add current request
        redis.call('ZADD', key, current_time, current_time)
        redis.call('EXPIRE', key, window_size)
        
        return 1
        """
        
        try:
            result = await self.redis_client.eval(
                lua_script,
                keys=[key],
                args=[current_time, self.window_size, self.requests_per_minute, self.burst_size]
            )
            return bool(result)
        except Exception as e:
            logger.error(f"Rate limit check error: {e}")
            return True  # Allow request on error
    
    async def _get_remaining_requests(self, client_id: str) -> int:
        """Get remaining requests for client"""
        key = f"rate_limit:{client_id}"
        current_time = int(time.time())
        
        try:
            # Remove old entries
            await self.redis_client.zremrangebyscore(key, 0, current_time - self.window_size)
            
            # Count current requests
            current_count = await self.redis_client.zcard(key)
            
            return max(0, self.requests_per_minute - current_count)
        except Exception as e:
            logger.error(f"Remaining requests check error: {e}")
            return self.requests_per_minute


class ConnectionPoolMiddleware(BaseHTTPMiddleware):
    """
    Middleware for managing database connection pools
    """
    
    def __init__(self, app, postgres_pool, mongodb_client, redis_client):
        super().__init__(app)
        self.postgres_pool = postgres_pool
        self.mongodb_client = mongodb_client
        self.redis_client = redis_client
    
    async def dispatch(self, request: Request, call_next):
        # Attach connection pool to request state
        request.state.postgres = self.postgres_pool
        request.state.mongodb = self.mongodb_client
        request.state.redis = self.redis_client
        
        # Health check for connections
        if hasattr(request.state, "postgres") and self.postgres_pool:
            try:
                # Check pool health
                if self.postgres_pool._closed:
                    logger.warning("PostgreSQL pool is closed, attempting to recreate...")
                    # Trigger reconnection logic here if needed
            except Exception as e:
                logger.error(f"PostgreSQL pool health check failed: {e}")
        
        response = await call_next(request)
        return response


def async_cache(ttl: int = 300, key_prefix: str = "func"):
    """
    Decorator for caching async function results
    """
    def decorator(func: Callable) -> Callable:
        @wraps(func)
        async def wrapper(*args, **kwargs):
            # Generate cache key
            cache_key = f"{key_prefix}:{func.__name__}:{str(args)}:{str(kwargs)}"
            cache_key_hash = hashlib.md5(cache_key.encode()).hexdigest()
            
            # Get redis client from request context if available
            redis_client = kwargs.get("redis_client")
            
            if redis_client:
                try:
                    # Try to get from cache
                    cached = await redis_client.get(cache_key_hash)
                    if cached:
                        return json.loads(cached)
                except Exception as e:
                    logger.error(f"Cache retrieval error in {func.__name__}: {e}")
            
            # Execute function
            result = await func(*args, **kwargs)
            
            # Store in cache
            if redis_client and result is not None:
                try:
                    await redis_client.setex(
                        cache_key_hash,
                        ttl,
                        json.dumps(result)
                    )
                except Exception as e:
                    logger.error(f"Cache storage error in {func.__name__}: {e}")
            
            return result
        
        return wrapper
    return decorator


class BatchRequestMiddleware:
    """
    Middleware for batching multiple API requests
    """
    
    def __init__(self, batch_size: int = 10, batch_timeout: float = 0.1):
        self.batch_size = batch_size
        self.batch_timeout = batch_timeout
        self.pending_requests = {}
        self.locks = {}
    
    async def process_batch(self, batch_key: str, request_data: dict) -> Any:
        """Process batched requests"""
        if batch_key not in self.locks:
            self.locks[batch_key] = asyncio.Lock()
        
        async with self.locks[batch_key]:
            if batch_key not in self.pending_requests:
                self.pending_requests[batch_key] = {
                    "requests": [],
                    "event": asyncio.Event()
                }
            
            batch = self.pending_requests[batch_key]
            batch["requests"].append(request_data)
            
            # If batch is full, process immediately
            if len(batch["requests"]) >= self.batch_size:
                return await self._execute_batch(batch_key)
            
            # Otherwise wait for timeout or batch to fill
            try:
                await asyncio.wait_for(
                    batch["event"].wait(),
                    timeout=self.batch_timeout
                )
            except asyncio.TimeoutError:
                pass
            
            return await self._execute_batch(batch_key)
    
    async def _execute_batch(self, batch_key: str) -> list:
        """Execute batched requests"""
        if batch_key not in self.pending_requests:
            return []
        
        batch = self.pending_requests[batch_key]
        requests = batch["requests"]
        
        # Clear batch
        del self.pending_requests[batch_key]
        
        # Process all requests in batch
        # This would be implemented based on specific batch processing logic
        results = []
        for req in requests:
            # Process each request
            result = await self._process_single_request(req)
            results.append(result)
        
        return results
    
    async def _process_single_request(self, request_data: dict) -> Any:
        """Process a single request from batch"""
        # Implement actual request processing logic
        return request_data


def setup_performance_middleware(app: FastAPI, redis_client: redis.Redis = None):
    """
    Setup all performance middleware for the application
    """
    # Add GZip compression
    app.add_middleware(GZipMiddleware, minimum_size=1000)
    
    # Add performance monitoring
    app.add_middleware(PerformanceMiddleware, slow_request_threshold=1.0)
    
    # Add caching if Redis is available
    if redis_client:
        app.add_middleware(
            CacheMiddleware,
            redis_client=redis_client,
            cache_ttl=300,
            exclude_paths=["/health", "/docs", "/openapi.json", "/auth"]
        )
        
        # Add rate limiting
        app.add_middleware(
            RateLimitMiddleware,
            redis_client=redis_client,
            requests_per_minute=60,
            burst_size=10
        )
    
    return app