"""
Traffic monitoring and analytics for CodeMCP Gateway.

Provides comprehensive monitoring of request patterns, performance metrics,
error tracking, and analytics dashboard data.
"""

import time
import asyncio
from typing import Dict, Any, List, Optional, Tuple
from datetime import datetime, timedelta
from collections import defaultdict, deque
from dataclasses import dataclass, field
from enum import Enum

from fastapi import Request, Response

from ..core.config import Config
from ..core.error_handler import log_info, log_debug, log_warning, handle_error
from ..core.monitoring import MetricsCollector, MetricPoint


class RequestStatus(Enum):
    """Request status types."""
    SUCCESS = "success"
    ERROR = "error"
    TIMEOUT = "timeout"
    RATE_LIMITED = "rate_limited"


@dataclass
class RequestMetrics:
    """Metrics for a single request."""
    timestamp: datetime
    method: str
    path: str
    status_code: int
    processing_time: float
    request_size: int
    response_size: int
    client_ip: str
    user_agent: str
    session_id: Optional[str] = None
    user_id: Optional[str] = None
    language: Optional[str] = None
    analysis_type: Optional[str] = None
    error_message: Optional[str] = None
    
    def to_dict(self) -> Dict[str, Any]:
        """Convert metrics to dictionary."""
        return {
            "timestamp": self.timestamp.isoformat(),
            "method": self.method,
            "path": self.path,
            "status_code": self.status_code,
            "processing_time": self.processing_time,
            "request_size": self.request_size,
            "response_size": self.response_size,
            "client_ip": self.client_ip,
            "user_agent": self.user_agent,
            "session_id": self.session_id,
            "user_id": self.user_id,
            "language": self.language,
            "analysis_type": self.analysis_type,
            "error_message": self.error_message
        }


class TrafficAnalyzer:
    """Analyzes traffic patterns and generates insights."""
    
    def __init__(self, max_history: int = 10000):
        self.max_history = max_history
        self.request_history: deque = deque(maxlen=max_history)
        
        # Aggregate statistics
        self.hourly_stats: Dict[str, Dict[str, Any]] = defaultdict(lambda: {
            "total_requests": 0,
            "successful_requests": 0,
            "error_requests": 0,
            "avg_response_time": 0.0,
            "languages_used": set(),
            "analysis_types_used": set(),
            "unique_ips": set(),
            "unique_sessions": set()
        })
        
        # Real-time statistics
        self.current_stats = {
            "requests_per_minute": deque(maxlen=60),  # Last 60 minutes
            "avg_response_time": deque(maxlen=100),   # Last 100 requests
            "error_rate": deque(maxlen=100),          # Last 100 requests
            "active_sessions": set(),
            "popular_endpoints": defaultdict(int),
            "language_popularity": defaultdict(int),
            "analysis_type_popularity": defaultdict(int)
        }
    
    def add_request(self, metrics: RequestMetrics):
        """Add request metrics to analyzer."""
        self.request_history.append(metrics)
        
        # Update hourly stats
        hour_key = metrics.timestamp.strftime("%Y-%m-%d-%H")
        hour_stats = self.hourly_stats[hour_key]
        
        hour_stats["total_requests"] += 1
        if 200 <= metrics.status_code < 400:
            hour_stats["successful_requests"] += 1
        else:
            hour_stats["error_requests"] += 1
        
        # Update average response time
        total = hour_stats["total_requests"]
        current_avg = hour_stats["avg_response_time"]
        hour_stats["avg_response_time"] = (current_avg * (total - 1) + metrics.processing_time) / total
        
        # Track unique values
        if metrics.language:
            hour_stats["languages_used"].add(metrics.language)
        if metrics.analysis_type:
            hour_stats["analysis_types_used"].add(metrics.analysis_type)
        hour_stats["unique_ips"].add(metrics.client_ip)
        if metrics.session_id:
            hour_stats["unique_sessions"].add(metrics.session_id)
        
        # Update real-time stats
        self._update_realtime_stats(metrics)
    
    def _update_realtime_stats(self, metrics: RequestMetrics):
        """Update real-time statistics."""
        # Response time tracking
        self.current_stats["avg_response_time"].append(metrics.processing_time)
        
        # Error rate tracking
        is_error = metrics.status_code >= 400
        self.current_stats["error_rate"].append(1 if is_error else 0)
        
        # Active sessions
        if metrics.session_id:
            self.current_stats["active_sessions"].add(metrics.session_id)
        
        # Popular endpoints
        endpoint = f"{metrics.method} {metrics.path}"
        self.current_stats["popular_endpoints"][endpoint] += 1
        
        # Language and analysis type popularity
        if metrics.language:
            self.current_stats["language_popularity"][metrics.language] += 1
        if metrics.analysis_type:
            self.current_stats["analysis_type_popularity"][metrics.analysis_type] += 1
    
    def get_current_metrics(self) -> Dict[str, Any]:
        """Get current real-time metrics."""
        # Calculate averages
        response_times = list(self.current_stats["avg_response_time"])
        avg_response_time = sum(response_times) / len(response_times) if response_times else 0.0
        
        error_rates = list(self.current_stats["error_rate"])
        error_rate = sum(error_rates) / len(error_rates) * 100 if error_rates else 0.0
        
        # Get popular items (top 10)
        popular_endpoints = dict(sorted(
            self.current_stats["popular_endpoints"].items(),
            key=lambda x: x[1], reverse=True
        )[:10])
        
        language_popularity = dict(sorted(
            self.current_stats["language_popularity"].items(),
            key=lambda x: x[1], reverse=True
        ))
        
        analysis_popularity = dict(sorted(
            self.current_stats["analysis_type_popularity"].items(),
            key=lambda x: x[1], reverse=True
        ))
        
        return {
            "avg_response_time": avg_response_time,
            "error_rate": error_rate,
            "active_sessions": len(self.current_stats["active_sessions"]),
            "popular_endpoints": popular_endpoints,
            "language_popularity": language_popularity,
            "analysis_type_popularity": analysis_popularity,
            "total_requests_tracked": len(self.request_history)
        }
    
    def get_hourly_report(self, hours: int = 24) -> Dict[str, Any]:
        """Get hourly statistics report."""
        now = datetime.utcnow()
        report_data = {}
        
        for i in range(hours):
            hour = now - timedelta(hours=i)
            hour_key = hour.strftime("%Y-%m-%d-%H")
            
            if hour_key in self.hourly_stats:
                stats = self.hourly_stats[hour_key].copy()
                # Convert sets to counts
                stats["unique_languages"] = len(stats["languages_used"])
                stats["unique_analysis_types"] = len(stats["analysis_types_used"])
                stats["unique_ips"] = len(stats["unique_ips"])
                stats["unique_sessions"] = len(stats["unique_sessions"])
                
                # Remove sets (not JSON serializable)
                del stats["languages_used"]
                del stats["analysis_types_used"]
                
                report_data[hour_key] = stats
            else:
                # Empty hour
                report_data[hour_key] = {
                    "total_requests": 0,
                    "successful_requests": 0,
                    "error_requests": 0,
                    "avg_response_time": 0.0,
                    "unique_languages": 0,
                    "unique_analysis_types": 0,
                    "unique_ips": 0,
                    "unique_sessions": 0
                }
        
        return report_data
    
    def get_performance_insights(self) -> Dict[str, Any]:
        """Generate performance insights and recommendations."""
        if not self.request_history:
            return {"insights": [], "recommendations": []}
        
        insights = []
        recommendations = []
        
        # Analyze response times
        response_times = [req.processing_time for req in self.request_history]
        avg_response_time = sum(response_times) / len(response_times)
        max_response_time = max(response_times)
        
        if avg_response_time > 2.0:
            insights.append(f"Average response time is high: {avg_response_time:.2f}s")
            recommendations.append("Consider optimizing slow endpoints or adding caching")
        
        if max_response_time > 10.0:
            insights.append(f"Maximum response time is very high: {max_response_time:.2f}s")
            recommendations.append("Investigate timeout issues and optimize slow operations")
        
        # Analyze error rates
        recent_requests = list(self.request_history)[-100:]  # Last 100 requests
        error_count = sum(1 for req in recent_requests if req.status_code >= 400)
        error_rate = error_count / len(recent_requests) * 100 if recent_requests else 0
        
        if error_rate > 10.0:
            insights.append(f"Error rate is high: {error_rate:.1f}%")
            recommendations.append("Review error logs and improve error handling")
        
        # Analyze traffic patterns
        endpoint_usage = defaultdict(int)
        for req in recent_requests:
            endpoint_usage[req.path] += 1
        
        top_endpoint = max(endpoint_usage.items(), key=lambda x: x[1]) if endpoint_usage else None
        if top_endpoint and top_endpoint[1] > len(recent_requests) * 0.5:
            insights.append(f"Single endpoint dominates traffic: {top_endpoint[0]} ({top_endpoint[1]} requests)")
            recommendations.append("Consider caching or optimizing the most popular endpoint")
        
        return {
            "insights": insights,
            "recommendations": recommendations,
            "metrics": {
                "avg_response_time": avg_response_time,
                "max_response_time": max_response_time,
                "error_rate": error_rate,
                "total_requests_analyzed": len(recent_requests)
            }
        }


class TrafficMonitor:
    """Main traffic monitoring system."""
    
    def __init__(self, config: Config):
        self.config = config
        self.metrics_collector = MetricsCollector()
        self.traffic_analyzer = TrafficAnalyzer()
        
        # Configuration
        self.enabled = True
        self.detailed_logging = config.debug
        self.retention_hours = 72  # Keep detailed data for 3 days
        
        # Background tasks
        self.cleanup_task: Optional[asyncio.Task] = None
        self.is_running = False
        
        # Statistics
        self.monitor_stats = {
            "total_requests_monitored": 0,
            "monitoring_start_time": datetime.utcnow(),
            "last_cleanup": None
        }
    
    async def initialize(self):
        """Initialize the traffic monitor."""
        if not self.enabled:
            log_info("Traffic monitoring is disabled")
            return
        
        # Start background tasks
        self.is_running = True
        self.cleanup_task = asyncio.create_task(self._cleanup_loop())
        
        log_info("Traffic monitor initialized successfully")
    
    async def shutdown(self):
        """Shutdown the traffic monitor."""
        self.is_running = False
        
        if self.cleanup_task:
            self.cleanup_task.cancel()
            try:
                await self.cleanup_task
            except asyncio.CancelledError:
                pass
        
        log_info("Traffic monitor shutdown complete")
    
    async def record_request(self, request: Request) -> float:
        """
        Record the start of a request.
        
        Args:
            request: FastAPI request object
            
        Returns:
            Start time for calculating processing time
        """
        if not self.enabled:
            return time.time()
        
        start_time = time.time()
        
        # Record basic request metrics
        self.metrics_collector.record_metric("requests_total", 1)
        self.metrics_collector.record_metric("requests_per_minute", 1)
        
        if self.detailed_logging:
            log_debug(f"Started monitoring request: {request.method} {request.url.path}")
        
        return start_time
    
    async def record_response(self, 
                            request: Request, 
                            response: Response, 
                            start_time: float):
        """
        Record the completion of a request.
        
        Args:
            request: FastAPI request object
            response: FastAPI response object
            start_time: Request start time
        """
        if not self.enabled:
            return
        
        end_time = time.time()
        processing_time = end_time - start_time
        
        # Extract request information
        metrics = self._extract_request_metrics(request, response, processing_time)
        
        # Record metrics
        self.metrics_collector.record_metric("response_time", processing_time)
        self.metrics_collector.record_metric("response_size", metrics.response_size)
        
        # Record status code metrics
        if 200 <= response.status_code < 300:
            self.metrics_collector.record_metric("requests_successful", 1)
        elif 400 <= response.status_code < 500:
            self.metrics_collector.record_metric("requests_client_error", 1)
        elif response.status_code >= 500:
            self.metrics_collector.record_metric("requests_server_error", 1)
        
        # Add to traffic analyzer
        self.traffic_analyzer.add_request(metrics)
        
        # Update statistics
        self.monitor_stats["total_requests_monitored"] += 1
        
        if self.detailed_logging:
            log_debug(f"Recorded response: {request.method} {request.url.path} - "
                     f"Status: {response.status_code}, Time: {processing_time:.3f}s")
    
    async def record_error(self, request: Request, error_info: Dict[str, Any]):
        """
        Record an error that occurred during request processing.
        
        Args:
            request: FastAPI request object
            error_info: Error information dictionary
        """
        if not self.enabled:
            return
        
        # Record error metrics
        self.metrics_collector.record_metric("requests_error", 1)
        
        # Extract what we can from the failed request
        metrics = RequestMetrics(
            timestamp=datetime.utcnow(),
            method=request.method,
            path=request.url.path,
            status_code=500,  # Assume server error
            processing_time=0.0,  # Unknown
            request_size=0,
            response_size=0,
            client_ip=self._get_client_ip(request),
            user_agent=request.headers.get("User-Agent", "unknown"),
            session_id=request.headers.get("X-Session-ID"),
            error_message=error_info.get("message", "Unknown error")
        )
        
        self.traffic_analyzer.add_request(metrics)
        
        log_warning(f"Recorded error for request: {request.method} {request.url.path}")
    
    def _extract_request_metrics(self, 
                                request: Request, 
                                response: Response, 
                                processing_time: float) -> RequestMetrics:
        """Extract comprehensive metrics from request/response."""
        # Calculate request size (approximation)
        request_size = len(str(request.url)) + sum(len(f"{k}: {v}") for k, v in request.headers.items())
        
        # Calculate response size (approximation)
        response_size = 0
        if hasattr(response, 'body'):
            response_size = len(response.body) if response.body else 0
        
        # Extract additional context from headers or request state
        session_id = request.headers.get("X-Session-ID")
        user_id = getattr(request.state, "user", {}).get("user_id") if hasattr(request.state, "user") else None
        language = request.headers.get("X-Language")
        analysis_type = request.headers.get("X-Analysis-Type")
        
        return RequestMetrics(
            timestamp=datetime.utcnow(),
            method=request.method,
            path=request.url.path,
            status_code=response.status_code,
            processing_time=processing_time,
            request_size=request_size,
            response_size=response_size,
            client_ip=self._get_client_ip(request),
            user_agent=request.headers.get("User-Agent", "unknown"),
            session_id=session_id,
            user_id=user_id,
            language=language,
            analysis_type=analysis_type
        )
    
    def _get_client_ip(self, request: Request) -> str:
        """Extract client IP address from request."""
        # Check for forwarded headers
        forwarded_for = request.headers.get("X-Forwarded-For")
        if forwarded_for:
            return forwarded_for.split(",")[0].strip()
        
        real_ip = request.headers.get("X-Real-IP")
        if real_ip:
            return real_ip
        
        # Fallback to client IP
        if request.client:
            return request.client.host
        
        return "unknown"
    
    async def _cleanup_loop(self):
        """Background loop to clean up old data."""
        while self.is_running:
            try:
                await self._cleanup_old_data()
                await asyncio.sleep(3600)  # Run every hour
                
            except asyncio.CancelledError:
                break
            except Exception as e:
                log_warning(f"Error in traffic monitor cleanup loop: {e}")
                await asyncio.sleep(1800)  # Wait 30 minutes on error
    
    async def _cleanup_old_data(self):
        """Clean up old monitoring data."""
        cutoff_time = datetime.utcnow() - timedelta(hours=self.retention_hours)
        
        # Clean up hourly stats
        old_keys = []
        for hour_key in self.traffic_analyzer.hourly_stats.keys():
            try:
                hour_time = datetime.strptime(hour_key, "%Y-%m-%d-%H")
                if hour_time < cutoff_time:
                    old_keys.append(hour_key)
            except ValueError:
                # Invalid format, remove it
                old_keys.append(hour_key)
        
        for key in old_keys:
            del self.traffic_analyzer.hourly_stats[key]
        
        # Clean up metrics collector (it has its own cleanup)
        # The request history in traffic analyzer has a maxlen, so it self-cleans
        
        self.monitor_stats["last_cleanup"] = datetime.utcnow()
        
        if old_keys:
            log_debug(f"Cleaned up {len(old_keys)} old hourly statistics")
    
    def get_dashboard_data(self) -> Dict[str, Any]:
        """Get data for monitoring dashboard."""
        current_metrics = self.traffic_analyzer.get_current_metrics()
        hourly_report = self.traffic_analyzer.get_hourly_report(24)
        performance_insights = self.traffic_analyzer.get_performance_insights()
        
        # Get recent metrics from collector
        metrics_summary = self.metrics_collector.get_all_metrics()
        
        return {
            "current_metrics": current_metrics,
            "hourly_report": hourly_report,
            "performance_insights": performance_insights,
            "system_metrics": metrics_summary,
            "monitor_stats": self.monitor_stats,
            "generated_at": datetime.utcnow().isoformat()
        }
    
    def get_traffic_summary(self, hours: int = 1) -> Dict[str, Any]:
        """Get traffic summary for specified time period."""
        return {
            "current_metrics": self.traffic_analyzer.get_current_metrics(),
            "hourly_data": self.traffic_analyzer.get_hourly_report(hours),
            "total_monitored": self.monitor_stats["total_requests_monitored"],
            "monitoring_duration": (datetime.utcnow() - self.monitor_stats["monitoring_start_time"]).total_seconds() / 3600
        }
    
    def get_health_status(self) -> Dict[str, Any]:
        """Get health status of the monitoring system."""
        return {
            "monitoring_enabled": self.enabled,
            "monitoring_active": self.is_running,
            "total_requests_monitored": self.monitor_stats["total_requests_monitored"],
            "uptime_hours": (datetime.utcnow() - self.monitor_stats["monitoring_start_time"]).total_seconds() / 3600,
            "last_cleanup": self.monitor_stats["last_cleanup"].isoformat() if self.monitor_stats["last_cleanup"] else None
        }