"""
性能监控和优化工具
监控数据同步过程的性能指标，提供优化建议
"""

import time
import psutil
import asyncio
from typing import Dict, Any, List, Optional
from dataclasses import dataclass, field
from collections import deque
import logging

logger = logging.getLogger(__name__)


@dataclass
class PerformanceMetrics:
    """性能指标"""
    timestamp: float
    cpu_percent: float
    memory_percent: float
    memory_used_mb: float
    disk_io_read_mb: float
    disk_io_write_mb: float
    network_sent_mb: float
    network_recv_mb: float
    active_connections: int = 0
    records_per_second: float = 0.0
    batch_processing_time: float = 0.0
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            'timestamp': self.timestamp,
            'cpu_percent': self.cpu_percent,
            'memory_percent': self.memory_percent,
            'memory_used_mb': self.memory_used_mb,
            'disk_io_read_mb': self.disk_io_read_mb,
            'disk_io_write_mb': self.disk_io_write_mb,
            'network_sent_mb': self.network_sent_mb,
            'network_recv_mb': self.network_recv_mb,
            'active_connections': self.active_connections,
            'records_per_second': self.records_per_second,
            'batch_processing_time': self.batch_processing_time
        }


@dataclass
class SyncTaskPerformance:
    """同步任务性能统计"""
    task_id: int
    execution_id: int
    start_time: float
    end_time: Optional[float] = None
    total_records: int = 0
    processed_records: int = 0
    failed_records: int = 0
    total_batches: int = 0
    avg_batch_time: float = 0.0
    max_batch_time: float = 0.0
    min_batch_time: float = float('inf')
    avg_records_per_second: float = 0.0
    peak_memory_mb: float = 0.0
    peak_cpu_percent: float = 0.0
    metrics_history: List[PerformanceMetrics] = field(default_factory=list)
    
    @property
    def duration(self) -> float:
        """执行时长（秒）"""
        end = self.end_time or time.time()
        return end - self.start_time
    
    @property
    def success_rate(self) -> float:
        """成功率"""
        if self.processed_records == 0:
            return 0.0
        return (self.processed_records - self.failed_records) / self.processed_records * 100
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            'task_id': self.task_id,
            'execution_id': self.execution_id,
            'duration': self.duration,
            'total_records': self.total_records,
            'processed_records': self.processed_records,
            'failed_records': self.failed_records,
            'success_rate': self.success_rate,
            'total_batches': self.total_batches,
            'avg_batch_time': self.avg_batch_time,
            'max_batch_time': self.max_batch_time,
            'min_batch_time': self.min_batch_time if self.min_batch_time != float('inf') else 0,
            'avg_records_per_second': self.avg_records_per_second,
            'peak_memory_mb': self.peak_memory_mb,
            'peak_cpu_percent': self.peak_cpu_percent
        }


class PerformanceMonitor:
    """性能监控器"""
    
    def __init__(self, max_history_size: int = 1000):
        """
        初始化性能监控器
        
        Args:
            max_history_size: 最大历史记录数量
        """
        self.max_history_size = max_history_size
        self.system_metrics: deque = deque(maxlen=max_history_size)
        self.task_performances: Dict[int, SyncTaskPerformance] = {}
        self.monitoring = False
        self.monitor_task: Optional[asyncio.Task] = None
        
        # 初始化系统信息
        self.process = psutil.Process()
        self.initial_disk_io = psutil.disk_io_counters()
        self.initial_network_io = psutil.net_io_counters()
    
    async def start_monitoring(self, interval: float = 1.0):
        """
        开始性能监控
        
        Args:
            interval: 监控间隔（秒）
        """
        if self.monitoring:
            return
        
        self.monitoring = True
        self.monitor_task = asyncio.create_task(self._monitor_loop(interval))
        logger.info("性能监控已启动")
    
    async def stop_monitoring(self):
        """停止性能监控"""
        self.monitoring = False
        
        if self.monitor_task:
            self.monitor_task.cancel()
            try:
                await self.monitor_task
            except asyncio.CancelledError:
                pass
        
        logger.info("性能监控已停止")
    
    async def _monitor_loop(self, interval: float):
        """监控循环"""
        while self.monitoring:
            try:
                metrics = await self._collect_metrics()
                self.system_metrics.append(metrics)
                
                # 更新任务性能统计
                for task_perf in self.task_performances.values():
                    if task_perf.end_time is None:  # 只更新正在运行的任务
                        task_perf.metrics_history.append(metrics)
                        task_perf.peak_memory_mb = max(task_perf.peak_memory_mb, metrics.memory_used_mb)
                        task_perf.peak_cpu_percent = max(task_perf.peak_cpu_percent, metrics.cpu_percent)
                
                await asyncio.sleep(interval)
                
            except Exception as e:
                logger.error(f"性能监控异常: {e}")
                await asyncio.sleep(interval)
    
    async def _collect_metrics(self) -> PerformanceMetrics:
        """收集性能指标"""
        # CPU和内存
        cpu_percent = self.process.cpu_percent()
        memory_info = self.process.memory_info()
        memory_percent = self.process.memory_percent()
        memory_used_mb = memory_info.rss / 1024 / 1024
        
        # 磁盘IO
        current_disk_io = psutil.disk_io_counters()
        disk_read_mb = 0.0
        disk_write_mb = 0.0
        
        if current_disk_io and self.initial_disk_io:
            disk_read_mb = (current_disk_io.read_bytes - self.initial_disk_io.read_bytes) / 1024 / 1024
            disk_write_mb = (current_disk_io.write_bytes - self.initial_disk_io.write_bytes) / 1024 / 1024
        
        # 网络IO
        current_network_io = psutil.net_io_counters()
        network_sent_mb = 0.0
        network_recv_mb = 0.0
        
        if current_network_io and self.initial_network_io:
            network_sent_mb = (current_network_io.bytes_sent - self.initial_network_io.bytes_sent) / 1024 / 1024
            network_recv_mb = (current_network_io.bytes_recv - self.initial_network_io.bytes_recv) / 1024 / 1024
        
        # 连接数（简化统计）
        active_connections = len(self.process.connections())
        
        return PerformanceMetrics(
            timestamp=time.time(),
            cpu_percent=cpu_percent,
            memory_percent=memory_percent,
            memory_used_mb=memory_used_mb,
            disk_io_read_mb=disk_read_mb,
            disk_io_write_mb=disk_write_mb,
            network_sent_mb=network_sent_mb,
            network_recv_mb=network_recv_mb,
            active_connections=active_connections
        )
    
    def start_task_monitoring(self, task_id: int, execution_id: int) -> SyncTaskPerformance:
        """
        开始任务性能监控
        
        Args:
            task_id: 任务ID
            execution_id: 执行ID
            
        Returns:
            任务性能统计对象
        """
        task_perf = SyncTaskPerformance(
            task_id=task_id,
            execution_id=execution_id,
            start_time=time.time()
        )
        
        self.task_performances[execution_id] = task_perf
        logger.debug(f"开始监控任务性能: task_id={task_id}, execution_id={execution_id}")
        
        return task_perf
    
    def end_task_monitoring(self, execution_id: int):
        """
        结束任务性能监控
        
        Args:
            execution_id: 执行ID
        """
        if execution_id in self.task_performances:
            task_perf = self.task_performances[execution_id]
            task_perf.end_time = time.time()
            
            # 计算平均指标
            if task_perf.total_batches > 0:
                task_perf.avg_batch_time = task_perf.duration / task_perf.total_batches
            
            if task_perf.duration > 0:
                task_perf.avg_records_per_second = task_perf.processed_records / task_perf.duration
            
            logger.debug(f"结束监控任务性能: execution_id={execution_id}")
    
    def update_batch_metrics(
        self, 
        execution_id: int, 
        batch_time: float, 
        records_count: int,
        failed_count: int = 0
    ):
        """
        更新批次性能指标
        
        Args:
            execution_id: 执行ID
            batch_time: 批次处理时间
            records_count: 记录数量
            failed_count: 失败记录数
        """
        if execution_id not in self.task_performances:
            return
        
        task_perf = self.task_performances[execution_id]
        task_perf.total_batches += 1
        task_perf.processed_records += records_count
        task_perf.failed_records += failed_count
        
        # 更新批次时间统计
        task_perf.max_batch_time = max(task_perf.max_batch_time, batch_time)
        task_perf.min_batch_time = min(task_perf.min_batch_time, batch_time)
        
        # 计算当前批次的记录处理速度
        if batch_time > 0:
            records_per_second = records_count / batch_time
            
            # 更新最新的性能指标
            if self.system_metrics:
                latest_metrics = self.system_metrics[-1]
                latest_metrics.records_per_second = records_per_second
                latest_metrics.batch_processing_time = batch_time
    
    def get_task_performance(self, execution_id: int) -> Optional[SyncTaskPerformance]:
        """
        获取任务性能统计
        
        Args:
            execution_id: 执行ID
            
        Returns:
            任务性能统计对象
        """
        return self.task_performances.get(execution_id)
    
    def get_system_metrics_summary(self, last_n_minutes: int = 10) -> Dict[str, Any]:
        """
        获取系统指标摘要
        
        Args:
            last_n_minutes: 最近N分钟的数据
            
        Returns:
            系统指标摘要
        """
        if not self.system_metrics:
            return {}
        
        # 筛选最近N分钟的数据
        cutoff_time = time.time() - (last_n_minutes * 60)
        recent_metrics = [m for m in self.system_metrics if m.timestamp >= cutoff_time]
        
        if not recent_metrics:
            return {}
        
        # 计算统计值
        cpu_values = [m.cpu_percent for m in recent_metrics]
        memory_values = [m.memory_used_mb for m in recent_metrics]
        records_per_second_values = [m.records_per_second for m in recent_metrics if m.records_per_second > 0]
        
        return {
            'time_range_minutes': last_n_minutes,
            'sample_count': len(recent_metrics),
            'cpu': {
                'avg': sum(cpu_values) / len(cpu_values),
                'max': max(cpu_values),
                'min': min(cpu_values)
            },
            'memory': {
                'avg_mb': sum(memory_values) / len(memory_values),
                'max_mb': max(memory_values),
                'min_mb': min(memory_values)
            },
            'performance': {
                'avg_records_per_second': sum(records_per_second_values) / len(records_per_second_values) if records_per_second_values else 0,
                'max_records_per_second': max(records_per_second_values) if records_per_second_values else 0
            }
        }
    
    def get_optimization_suggestions(self, execution_id: int) -> List[str]:
        """
        获取性能优化建议
        
        Args:
            execution_id: 执行ID
            
        Returns:
            优化建议列表
        """
        suggestions = []
        
        task_perf = self.get_task_performance(execution_id)
        if not task_perf:
            return suggestions
        
        # CPU使用率建议
        if task_perf.peak_cpu_percent > 80:
            suggestions.append("CPU使用率过高，建议减少并发任务数量或优化查询语句")
        elif task_perf.peak_cpu_percent < 30:
            suggestions.append("CPU使用率较低，可以考虑增加并发任务数量以提高吞吐量")
        
        # 内存使用建议
        if task_perf.peak_memory_mb > 1000:  # 1GB
            suggestions.append("内存使用量较高，建议减少批处理大小或优化数据处理逻辑")
        
        # 批处理时间建议
        if task_perf.avg_batch_time > 30:  # 30秒
            suggestions.append("批处理时间较长，建议优化目标系统写入性能或减少批处理大小")
        
        # 处理速度建议
        if task_perf.avg_records_per_second < 100:
            suggestions.append("数据处理速度较慢，建议检查网络连接、数据库性能或增加批处理大小")
        
        # 成功率建议
        if task_perf.success_rate < 95:
            suggestions.append("数据处理成功率较低，建议检查数据质量和错误处理逻辑")
        
        return suggestions
    
    def cleanup_old_data(self, days: int = 7):
        """
        清理旧的性能数据
        
        Args:
            days: 保留天数
        """
        cutoff_time = time.time() - (days * 24 * 3600)
        
        # 清理系统指标
        self.system_metrics = deque(
            [m for m in self.system_metrics if m.timestamp >= cutoff_time],
            maxlen=self.max_history_size
        )
        
        # 清理任务性能数据
        to_remove = []
        for execution_id, task_perf in self.task_performances.items():
            if task_perf.start_time < cutoff_time:
                to_remove.append(execution_id)
        
        for execution_id in to_remove:
            del self.task_performances[execution_id]
        
        logger.info(f"清理了 {len(to_remove)} 个旧的任务性能记录")


# 全局性能监控器实例
performance_monitor = PerformanceMonitor()
