import requests
import time
import psutil
import logging
from datetime import datetime, timedelta
from django.utils import timezone
from django.db import connection
from django.core.cache import cache
from django.conf import settings
from django.db import models
from typing import Dict, List, Any, Optional

from .models import (
    SystemMetric, MetricData, SystemAlert, HealthCheck,
    PerformanceLog, SystemConfiguration
)

logger = logging.getLogger(__name__)


class MonitoringService:
    """
    监控服务
    """
    
    def __init__(self):
        self.session = requests.Session()
        # 设置超时时间
        self.timeout = 30
    
    def collect_system_metrics(self) -> Dict[str, Any]:
        """
        收集系统指标
        """
        metrics = {}
        
        try:
            # CPU使用率
            cpu_percent = psutil.cpu_percent(interval=1)
            metrics['cpu_usage'] = {
                'value': cpu_percent,
                'unit': '%',
                'timestamp': timezone.now()
            }
            
            # 内存使用率
            memory = psutil.virtual_memory()
            metrics['memory_usage'] = {
                'value': memory.percent,
                'unit': '%',
                'timestamp': timezone.now(),
                'metadata': {
                    'total_gb': round(memory.total / (1024**3), 2),
                    'available_gb': round(memory.available / (1024**3), 2),
                    'used_gb': round(memory.used / (1024**3), 2)
                }
            }
            
            # 磁盘使用率
            disk = psutil.disk_usage('/')
            metrics['disk_usage'] = {
                'value': (disk.used / disk.total) * 100,
                'unit': '%',
                'timestamp': timezone.now(),
                'metadata': {
                    'total_gb': round(disk.total / (1024**3), 2),
                    'used_gb': round(disk.used / (1024**3), 2),
                    'free_gb': round(disk.free / (1024**3), 2)
                }
            }
            
            # 网络IO
            network = psutil.net_io_counters()
            metrics['network_io'] = {
                'value': network.bytes_sent + network.bytes_recv,
                'unit': 'bytes',
                'timestamp': timezone.now(),
                'metadata': {
                    'bytes_sent': network.bytes_sent,
                    'bytes_recv': network.bytes_recv,
                    'packets_sent': network.packets_sent,
                    'packets_recv': network.packets_recv
                }
            }
            
        except Exception as e:
            logger.error(f"收集系统指标时发生错误: {str(e)}")
            metrics['error'] = str(e)
        
        return metrics
    
    def collect_database_metrics(self) -> Dict[str, Any]:
        """
        收集数据库指标
        """
        metrics = {}
        
        try:
            # 数据库连接数
            with connection.cursor() as cursor:
                cursor.execute("""
                    SELECT count(*) as connection_count
                    FROM pg_stat_activity
                    WHERE state = 'active'
                """)
                result = cursor.fetchone()
                
                metrics['database_connections'] = {
                    'value': result[0] if result else 0,
                    'unit': 'count',
                    'timestamp': timezone.now()
                }
                
                # 数据库大小
                cursor.execute("""
                    SELECT pg_size_pretty(pg_database_size(current_database()))
                """)
                db_size = cursor.fetchone()
                
                # 慢查询数量（假设有慢查询日志）
                cursor.execute("""
                    SELECT count(*) as slow_query_count
                    FROM pg_stat_statements
                    WHERE mean_time > 1000
                """)
                slow_queries = cursor.fetchone()
                
                metrics['slow_queries'] = {
                    'value': slow_queries[0] if slow_queries else 0,
                    'unit': 'count',
                    'timestamp': timezone.now()
                }
                
        except Exception as e:
            logger.error(f"收集数据库指标时发生错误: {str(e)}")
            metrics['error'] = str(e)
        
        return metrics
    
    def collect_application_metrics(self) -> Dict[str, Any]:
        """
        收集应用指标
        """
        metrics = {}
        
        try:
            # 活跃用户数（最近1小时）
            from django.contrib.auth import get_user_model
            User = get_user_model()
            
            one_hour_ago = timezone.now() - timedelta(hours=1)
            active_users = User.objects.filter(
                last_login__gte=one_hour_ago
            ).count()
            
            metrics['active_users'] = {
                'value': active_users,
                'unit': 'count',
                'timestamp': timezone.now()
            }
            
            # API响应时间（从性能日志获取）
            avg_response_time = PerformanceLog.objects.filter(
                log_type='api_request',
                timestamp__gte=one_hour_ago
            ).aggregate(
                avg_duration=models.Avg('duration_ms')
            )['avg_duration']
            
            metrics['api_response_time'] = {
                'value': avg_response_time or 0,
                'unit': 'ms',
                'timestamp': timezone.now()
            }
            
            # 错误率
            total_requests = PerformanceLog.objects.filter(
                log_type='api_request',
                timestamp__gte=one_hour_ago
            ).count()
            
            error_requests = PerformanceLog.objects.filter(
                log_type='api_request',
                timestamp__gte=one_hour_ago,
                success=False
            ).count()
            
            error_rate = (error_requests / total_requests * 100) if total_requests > 0 else 0
            
            metrics['error_rate'] = {
                'value': error_rate,
                'unit': '%',
                'timestamp': timezone.now()
            }
            
        except Exception as e:
            logger.error(f"收集应用指标时发生错误: {str(e)}")
            metrics['error'] = str(e)
        
        return metrics
    
    def store_metrics(self, metrics: Dict[str, Any]) -> None:
        """
        存储指标数据
        """
        for metric_name, metric_data in metrics.items():
            if metric_name == 'error':
                continue
            
            try:
                # 查找或创建指标
                metric, created = SystemMetric.objects.get_or_create(
                    name=metric_name,
                    defaults={
                        'metric_type': self._get_metric_type(metric_name),
                        'unit': metric_data.get('unit', ''),
                        'description': f'自动收集的{metric_name}指标'
                    }
                )
                
                # 创建指标数据
                MetricData.objects.create(
                    metric=metric,
                    value=metric_data['value'],
                    timestamp=metric_data['timestamp'],
                    metadata=metric_data.get('metadata', {})
                )
                
                # 检查阈值并创建告警
                self._check_thresholds(metric, metric_data['value'])
                
            except Exception as e:
                logger.error(f"存储指标 {metric_name} 时发生错误: {str(e)}")
    
    def _get_metric_type(self, metric_name: str) -> str:
        """
        根据指标名称获取指标类型
        """
        type_mapping = {
            'cpu_usage': 'cpu_usage',
            'memory_usage': 'memory_usage',
            'disk_usage': 'disk_usage',
            'network_io': 'network_io',
            'database_connections': 'database_connections',
            'api_response_time': 'api_response_time',
            'error_rate': 'error_rate',
            'active_users': 'active_users'
        }
        return type_mapping.get(metric_name, 'custom')
    
    def _check_thresholds(self, metric: SystemMetric, value: float) -> None:
        """
        检查阈值并创建告警
        """
        alert_level = None
        threshold_value = None
        
        if metric.critical_threshold is not None and value >= metric.critical_threshold:
            alert_level = 'critical'
            threshold_value = metric.critical_threshold
        elif metric.warning_threshold is not None and value >= metric.warning_threshold:
            alert_level = 'warning'
            threshold_value = metric.warning_threshold
        
        if alert_level:
            # 检查是否已存在相同的活跃告警
            existing_alert = SystemAlert.objects.filter(
                metric=metric,
                status='active',
                alert_level=alert_level
            ).first()
            
            if not existing_alert:
                SystemAlert.objects.create(
                    metric=metric,
                    alert_level=alert_level,
                    title=f'{metric.name}超过{alert_level}阈值',
                    message=f'{metric.name}当前值{value}{metric.unit}，超过{alert_level}阈值{threshold_value}{metric.unit}',
                    trigger_value=value,
                    threshold_value=threshold_value
                )
    
    def execute_health_check(self, health_check: HealthCheck) -> Dict[str, Any]:
        """
        执行健康检查
        """
        start_time = time.time()
        result = {
            'success': False,
            'error': None,
            'duration_ms': 0,
            'timestamp': timezone.now()
        }
        
        try:
            if health_check.check_type == 'http':
                result = self._execute_http_check(health_check)
            elif health_check.check_type == 'database':
                result = self._execute_database_check(health_check)
            elif health_check.check_type == 'redis':
                result = self._execute_redis_check(health_check)
            elif health_check.check_type == 'service':
                result = self._execute_service_check(health_check)
            else:
                result['error'] = f'不支持的检查类型: {health_check.check_type}'
            
        except Exception as e:
            result['error'] = str(e)
        
        result['duration_ms'] = (time.time() - start_time) * 1000
        return result
    
    def _execute_http_check(self, health_check: HealthCheck) -> Dict[str, Any]:
        """
        执行HTTP健康检查
        """
        try:
            response = self.session.get(
                health_check.endpoint,
                timeout=health_check.timeout
            )
            
            if response.status_code == 200:
                return {'success': True, 'error': None}
            else:
                return {
                    'success': False,
                    'error': f'HTTP状态码: {response.status_code}'
                }
        
        except requests.exceptions.Timeout:
            return {'success': False, 'error': '请求超时'}
        except requests.exceptions.ConnectionError:
            return {'success': False, 'error': '连接失败'}
        except Exception as e:
            return {'success': False, 'error': str(e)}
    
    def _execute_database_check(self, health_check: HealthCheck) -> Dict[str, Any]:
        """
        执行数据库健康检查
        """
        try:
            with connection.cursor() as cursor:
                cursor.execute('SELECT 1')
                result = cursor.fetchone()
                
                if result and result[0] == 1:
                    return {'success': True, 'error': None}
                else:
                    return {'success': False, 'error': '数据库查询失败'}
        
        except Exception as e:
            return {'success': False, 'error': str(e)}
    
    def _execute_redis_check(self, health_check: HealthCheck) -> Dict[str, Any]:
        """
        执行Redis健康检查
        """
        try:
            # 尝试设置和获取一个测试键
            test_key = 'health_check_test'
            cache.set(test_key, 'test_value', timeout=60)
            value = cache.get(test_key)
            
            if value == 'test_value':
                cache.delete(test_key)
                return {'success': True, 'error': None}
            else:
                return {'success': False, 'error': 'Redis读写测试失败'}
        
        except Exception as e:
            return {'success': False, 'error': str(e)}
    
    def _execute_service_check(self, health_check: HealthCheck) -> Dict[str, Any]:
        """
        执行服务健康检查
        """
        try:
            # 这里可以根据具体的服务类型实现检查逻辑
            # 例如检查进程是否运行、端口是否监听等
            return {'success': True, 'error': None}
        
        except Exception as e:
            return {'success': False, 'error': str(e)}


class AlertService:
    """
    告警服务
    """
    
    def __init__(self):
        self.notification_channels = self._load_notification_channels()
    
    def _load_notification_channels(self) -> List[Dict[str, Any]]:
        """
        加载通知渠道配置
        """
        try:
            config = SystemConfiguration.objects.filter(
                config_type='alerting',
                name='notification_channels',
                is_active=True
            ).first()
            
            if config:
                return config.config_value.get('channels', [])
            
        except Exception as e:
            logger.error(f"加载通知渠道配置时发生错误: {str(e)}")
        
        return []
    
    def send_alert(self, alert: SystemAlert) -> None:
        """
        发送告警通知
        """
        for channel in self.notification_channels:
            try:
                if channel['type'] == 'email':
                    self._send_email_alert(alert, channel)
                elif channel['type'] == 'webhook':
                    self._send_webhook_alert(alert, channel)
                elif channel['type'] == 'sms':
                    self._send_sms_alert(alert, channel)
                
            except Exception as e:
                logger.error(f"发送告警通知时发生错误: {str(e)}")
    
    def _send_email_alert(self, alert: SystemAlert, channel: Dict[str, Any]) -> None:
        """
        发送邮件告警
        """
        from django.core.mail import send_mail
        
        subject = f'[{alert.get_alert_level_display()}] {alert.title}'
        message = f"""
        告警详情：
        
        指标：{alert.metric.name}
        级别：{alert.get_alert_level_display()}
        消息：{alert.message}
        触发值：{alert.trigger_value}{alert.metric.unit}
        阈值：{alert.threshold_value}{alert.metric.unit}
        触发时间：{alert.triggered_at}
        
        请及时处理。
        """
        
        send_mail(
            subject=subject,
            message=message,
            from_email=settings.DEFAULT_FROM_EMAIL,
            recipient_list=channel.get('recipients', []),
            fail_silently=False
        )
    
    def _send_webhook_alert(self, alert: SystemAlert, channel: Dict[str, Any]) -> None:
        """
        发送Webhook告警
        """
        payload = {
            'alert_id': alert.id,
            'metric_name': alert.metric.name,
            'alert_level': alert.alert_level,
            'title': alert.title,
            'message': alert.message,
            'trigger_value': alert.trigger_value,
            'threshold_value': alert.threshold_value,
            'triggered_at': alert.triggered_at.isoformat()
        }
        
        response = requests.post(
            channel['url'],
            json=payload,
            headers=channel.get('headers', {}),
            timeout=30
        )
        
        response.raise_for_status()
    
    def _send_sms_alert(self, alert: SystemAlert, channel: Dict[str, Any]) -> None:
        """
        发送短信告警
        """
        # 这里需要根据具体的短信服务提供商实现
        message = f'[{alert.get_alert_level_display()}] {alert.title}: {alert.message}'
        
        # 示例：使用阿里云短信服务
        # 实际实现需要根据具体的短信服务API
        pass


class PerformanceAnalyzer:
    """
    性能分析器
    """
    
    def analyze_api_performance(self, hours: int = 24) -> Dict[str, Any]:
        """
        分析API性能
        """
        end_time = timezone.now()
        start_time = end_time - timedelta(hours=hours)
        
        # 获取API请求日志
        api_logs = PerformanceLog.objects.filter(
            log_type='api_request',
            timestamp__range=[start_time, end_time]
        )
        
        # 总体统计
        from django.db.models import Avg, Max, Min, Count, Q
        overall_stats = api_logs.aggregate(
            avg_duration=Avg('duration_ms'),
            max_duration=Max('duration_ms'),
            min_duration=Min('duration_ms'),
            total_requests=Count('id'),
            success_count=Count('id', filter=Q(success=True)),
            error_count=Count('id', filter=Q(success=False))
        )
        
        # 按操作分组统计
        operation_stats = api_logs.values('operation').annotate(
            avg_duration=Avg('duration_ms'),
            max_duration=Max('duration_ms'),
            request_count=Count('id'),
            error_count=Count('id', filter=Q(success=False))
        ).order_by('-avg_duration')
        
        # 慢请求分析（超过1秒的请求）
        slow_requests = api_logs.filter(duration_ms__gt=1000).values(
            'operation', 'duration_ms', 'timestamp', 'user__username'
        ).order_by('-duration_ms')[:20]
        
        # 错误分析
        error_analysis = api_logs.filter(success=False).values(
            'operation', 'error_message'
        ).annotate(
            error_count=Count('id')
        ).order_by('-error_count')[:10]
        
        return {
            'overall_stats': overall_stats,
            'operation_stats': list(operation_stats),
            'slow_requests': list(slow_requests),
            'error_analysis': list(error_analysis),
            'time_range': {
                'start': start_time,
                'end': end_time,
                'hours': hours
            }
        }
    
    def analyze_database_performance(self, hours: int = 24) -> Dict[str, Any]:
        """
        分析数据库性能
        """
        end_time = timezone.now()
        start_time = end_time - timedelta(hours=hours)
        
        # 获取数据库查询日志
        db_logs = PerformanceLog.objects.filter(
            log_type='database_query',
            timestamp__range=[start_time, end_time]
        )
        
        # 统计分析
        from django.db.models import Avg, Max, Min, Count
        stats = db_logs.aggregate(
            avg_duration=Avg('duration_ms'),
            max_duration=Max('duration_ms'),
            min_duration=Min('duration_ms'),
            total_queries=Count('id')
        )
        
        # 慢查询分析
        slow_queries = db_logs.filter(duration_ms__gt=100).values(
            'operation', 'duration_ms', 'timestamp'
        ).order_by('-duration_ms')[:20]
        
        return {
            'stats': stats,
            'slow_queries': list(slow_queries),
            'time_range': {
                'start': start_time,
                'end': end_time,
                'hours': hours
            }
        }
    
    def generate_performance_report(self, hours: int = 24) -> Dict[str, Any]:
        """
        生成性能报告
        """
        api_analysis = self.analyze_api_performance(hours)
        db_analysis = self.analyze_database_performance(hours)
        
        # 性能建议
        recommendations = self._generate_recommendations(api_analysis, db_analysis)
        
        return {
            'api_performance': api_analysis,
            'database_performance': db_analysis,
            'recommendations': recommendations,
            'generated_at': timezone.now()
        }
    
    def _generate_recommendations(self, api_analysis: Dict, db_analysis: Dict) -> List[str]:
        """
        生成性能优化建议
        """
        recommendations = []
        
        # API性能建议
        api_stats = api_analysis.get('overall_stats', {})
        avg_duration = api_stats.get('avg_duration', 0)
        
        if avg_duration > 1000:  # 平均响应时间超过1秒
            recommendations.append('API平均响应时间较长，建议优化慢接口')
        
        error_rate = 0
        if api_stats.get('total_requests', 0) > 0:
            error_rate = (api_stats.get('error_count', 0) / api_stats.get('total_requests', 1)) * 100
        
        if error_rate > 5:  # 错误率超过5%
            recommendations.append(f'API错误率较高({error_rate:.1f}%)，建议检查错误原因')
        
        # 数据库性能建议
        db_stats = db_analysis.get('stats', {})
        db_avg_duration = db_stats.get('avg_duration', 0)
        
        if db_avg_duration > 100:  # 平均查询时间超过100ms
            recommendations.append('数据库查询较慢，建议优化SQL或添加索引')
        
        slow_queries = db_analysis.get('slow_queries', [])
        if len(slow_queries) > 10:
            recommendations.append('存在较多慢查询，建议进行SQL优化')
        
        if not recommendations:
            recommendations.append('系统性能良好，无明显问题')
        
        return recommendations


class MetricCleanupService:
    """
    指标数据清理服务
    """
    
    def cleanup_old_data(self) -> Dict[str, int]:
        """
        清理过期的指标数据
        """
        cleanup_stats = {
            'metric_data_deleted': 0,
            'performance_logs_deleted': 0,
            'resolved_alerts_deleted': 0
        }
        
        # 清理过期的指标数据
        for metric in SystemMetric.objects.filter(is_active=True):
            cutoff_date = timezone.now() - timedelta(days=metric.retention_days)
            
            deleted_count = MetricData.objects.filter(
                metric=metric,
                timestamp__lt=cutoff_date
            ).delete()[0]
            
            cleanup_stats['metric_data_deleted'] += deleted_count
        
        # 清理过期的性能日志（默认保留30天）
        performance_cutoff = timezone.now() - timedelta(days=30)
        performance_deleted = PerformanceLog.objects.filter(
            timestamp__lt=performance_cutoff
        ).delete()[0]
        
        cleanup_stats['performance_logs_deleted'] = performance_deleted
        
        # 清理已解决的告警（保留90天）
        alert_cutoff = timezone.now() - timedelta(days=90)
        alerts_deleted = SystemAlert.objects.filter(
            status='resolved',
            resolved_at__lt=alert_cutoff
        ).delete()[0]
        
        cleanup_stats['resolved_alerts_deleted'] = alerts_deleted
        
        return cleanup_stats