"""
性能指标收集器

收集性能指标、资源使用统计、瓶颈分析等功能。
"""

import time
import psutil
import threading
from typing import Dict, List, Optional, Any
from dataclasses import dataclass, field
from datetime import datetime, timezone

from ..utils.logger import get_module_logger

logger = get_module_logger(__name__)


@dataclass
class PerformanceMetrics:
    """性能指标数据"""
    timestamp: datetime
    cpu_percent: float
    memory_percent: float
    memory_used_mb: float
    disk_io_read_mb: float
    disk_io_write_mb: float
    network_sent_mb: float
    network_recv_mb: float
    custom_metrics: Dict[str, Any] = field(default_factory=dict)


class MetricsCollector:
    """性能指标收集器
    
    收集系统性能指标和自定义业务指标。
    """
    
    def __init__(self, collection_interval: float = 5.0):
        """初始化指标收集器
        
        Args:
            collection_interval: 收集间隔（秒）
        """
        self.collection_interval = collection_interval
        self.metrics_history: List[PerformanceMetrics] = []
        self.custom_counters: Dict[str, float] = {}
        self.custom_gauges: Dict[str, float] = {}
        self._lock = threading.Lock()
        self._collection_thread = None
        self._stop_collection = False
        
        # 初始化基线指标
        self._baseline_disk_io = psutil.disk_io_counters()
        self._baseline_network = psutil.net_io_counters()
        self._last_collection_time = time.time()
        
        logger.info("性能指标收集器初始化完成")
    
    def start_collection(self):
        """开始收集指标"""
        if self._collection_thread and self._collection_thread.is_alive():
            return
        
        self._stop_collection = False
        self._collection_thread = threading.Thread(
            target=self._collection_loop,
            daemon=True
        )
        self._collection_thread.start()
        
        logger.info("开始性能指标收集")
    
    def stop_collection(self):
        """停止收集指标"""
        self._stop_collection = True
        if self._collection_thread:
            self._collection_thread.join(timeout=5.0)
        
        logger.info("停止性能指标收集")
    
    def increment_counter(self, name: str, value: float = 1.0):
        """增加计数器
        
        Args:
            name: 计数器名称
            value: 增加值
        """
        with self._lock:
            self.custom_counters[name] = self.custom_counters.get(name, 0) + value
    
    def set_gauge(self, name: str, value: float):
        """设置仪表值
        
        Args:
            name: 仪表名称
            value: 仪表值
        """
        with self._lock:
            self.custom_gauges[name] = value
    
    def get_current_metrics(self) -> PerformanceMetrics:
        """获取当前性能指标
        
        Returns:
            当前性能指标
        """
        try:
            # CPU使用率
            cpu_percent = psutil.cpu_percent(interval=0.1)
            
            # 内存使用情况
            memory = psutil.virtual_memory()
            memory_percent = memory.percent
            memory_used_mb = memory.used / (1024 * 1024)
            
            # 磁盘IO
            current_disk_io = psutil.disk_io_counters()
            current_time = time.time()
            time_delta = current_time - self._last_collection_time
            
            if time_delta > 0:
                disk_read_delta = current_disk_io.read_bytes - self._baseline_disk_io.read_bytes
                disk_write_delta = current_disk_io.write_bytes - self._baseline_disk_io.write_bytes
                disk_io_read_mb = (disk_read_delta / time_delta) / (1024 * 1024)
                disk_io_write_mb = (disk_write_delta / time_delta) / (1024 * 1024)
            else:
                disk_io_read_mb = 0
                disk_io_write_mb = 0
            
            # 网络IO
            current_network = psutil.net_io_counters()
            if time_delta > 0:
                network_sent_delta = current_network.bytes_sent - self._baseline_network.bytes_sent
                network_recv_delta = current_network.bytes_recv - self._baseline_network.bytes_recv
                network_sent_mb = (network_sent_delta / time_delta) / (1024 * 1024)
                network_recv_mb = (network_recv_delta / time_delta) / (1024 * 1024)
            else:
                network_sent_mb = 0
                network_recv_mb = 0
            
            # 更新基线
            self._baseline_disk_io = current_disk_io
            self._baseline_network = current_network
            self._last_collection_time = current_time
            
            # 收集自定义指标
            with self._lock:
                custom_metrics = {
                    **self.custom_counters.copy(),
                    **self.custom_gauges.copy()
                }
            
            return PerformanceMetrics(
                timestamp=datetime.now(timezone.utc),
                cpu_percent=cpu_percent,
                memory_percent=memory_percent,
                memory_used_mb=memory_used_mb,
                disk_io_read_mb=disk_io_read_mb,
                disk_io_write_mb=disk_io_write_mb,
                network_sent_mb=network_sent_mb,
                network_recv_mb=network_recv_mb,
                custom_metrics=custom_metrics
            )
            
        except Exception as e:
            logger.error(f"收集性能指标失败: {e}")
            return PerformanceMetrics(
                timestamp=datetime.now(timezone.utc),
                cpu_percent=0,
                memory_percent=0,
                memory_used_mb=0,
                disk_io_read_mb=0,
                disk_io_write_mb=0,
                network_sent_mb=0,
                network_recv_mb=0
            )
    
    def get_metrics_history(self, minutes: int = 60) -> List[PerformanceMetrics]:
        """获取指标历史
        
        Args:
            minutes: 历史时间范围（分钟）
            
        Returns:
            指标历史列表
        """
        with self._lock:
            cutoff_time = datetime.now(timezone.utc).timestamp() - (minutes * 60)
            return [
                metrics for metrics in self.metrics_history
                if metrics.timestamp.timestamp() >= cutoff_time
            ]
    
    def get_metrics_summary(self, minutes: int = 60) -> Dict[str, Any]:
        """获取指标摘要
        
        Args:
            minutes: 统计时间范围（分钟）
            
        Returns:
            指标摘要
        """
        history = self.get_metrics_history(minutes)
        
        if not history:
            return {}
        
        # 计算统计值
        cpu_values = [m.cpu_percent for m in history]
        memory_values = [m.memory_percent for m in history]
        disk_read_values = [m.disk_io_read_mb for m in history]
        disk_write_values = [m.disk_io_write_mb for m in history]
        network_sent_values = [m.network_sent_mb for m in history]
        network_recv_values = [m.network_recv_mb for m in history]
        
        return {
            'time_range_minutes': minutes,
            'sample_count': len(history),
            'cpu': {
                'avg': sum(cpu_values) / len(cpu_values),
                'max': max(cpu_values),
                'min': min(cpu_values)
            },
            'memory': {
                'avg': sum(memory_values) / len(memory_values),
                'max': max(memory_values),
                'min': min(memory_values),
                'current_mb': history[-1].memory_used_mb
            },
            'disk_io': {
                'avg_read_mb_per_sec': sum(disk_read_values) / len(disk_read_values),
                'avg_write_mb_per_sec': sum(disk_write_values) / len(disk_write_values),
                'max_read_mb_per_sec': max(disk_read_values),
                'max_write_mb_per_sec': max(disk_write_values)
            },
            'network': {
                'avg_sent_mb_per_sec': sum(network_sent_values) / len(network_sent_values),
                'avg_recv_mb_per_sec': sum(network_recv_values) / len(network_recv_values),
                'max_sent_mb_per_sec': max(network_sent_values),
                'max_recv_mb_per_sec': max(network_recv_values)
            }
        }
    
    def detect_bottlenecks(self) -> List[Dict[str, Any]]:
        """检测性能瓶颈
        
        Returns:
            瓶颈列表
        """
        bottlenecks = []
        
        try:
            current_metrics = self.get_current_metrics()
            
            # CPU瓶颈检测
            if current_metrics.cpu_percent > 80:
                bottlenecks.append({
                    'type': 'cpu',
                    'severity': 'high' if current_metrics.cpu_percent > 90 else 'medium',
                    'value': current_metrics.cpu_percent,
                    'message': f'CPU使用率过高: {current_metrics.cpu_percent:.1f}%'
                })
            
            # 内存瓶颈检测
            if current_metrics.memory_percent > 80:
                bottlenecks.append({
                    'type': 'memory',
                    'severity': 'high' if current_metrics.memory_percent > 90 else 'medium',
                    'value': current_metrics.memory_percent,
                    'message': f'内存使用率过高: {current_metrics.memory_percent:.1f}%'
                })
            
            # 磁盘IO瓶颈检测
            total_disk_io = current_metrics.disk_io_read_mb + current_metrics.disk_io_write_mb
            if total_disk_io > 100:  # 100MB/s
                bottlenecks.append({
                    'type': 'disk_io',
                    'severity': 'medium',
                    'value': total_disk_io,
                    'message': f'磁盘IO负载较高: {total_disk_io:.1f} MB/s'
                })
            
            # 网络IO瓶颈检测
            total_network_io = current_metrics.network_sent_mb + current_metrics.network_recv_mb
            if total_network_io > 50:  # 50MB/s
                bottlenecks.append({
                    'type': 'network_io',
                    'severity': 'medium',
                    'value': total_network_io,
                    'message': f'网络IO负载较高: {total_network_io:.1f} MB/s'
                })
            
        except Exception as e:
            logger.error(f"检测性能瓶颈失败: {e}")
        
        return bottlenecks
    
    def _collection_loop(self):
        """指标收集循环"""
        while not self._stop_collection:
            try:
                # 收集当前指标
                metrics = self.get_current_metrics()
                
                # 添加到历史记录
                with self._lock:
                    self.metrics_history.append(metrics)
                    
                    # 保持最近1000个指标记录
                    if len(self.metrics_history) > 1000:
                        self.metrics_history = self.metrics_history[-1000:]
                
                time.sleep(self.collection_interval)
                
            except Exception as e:
                logger.error(f"指标收集循环出错: {e}")
                time.sleep(self.collection_interval)
    
    def export_metrics(self, format: str = 'json') -> str:
        """导出指标数据
        
        Args:
            format: 导出格式 (json, csv)
            
        Returns:
            导出的数据字符串
        """
        with self._lock:
            if format == 'json':
                import json
                data = []
                for metrics in self.metrics_history:
                    data.append({
                        'timestamp': metrics.timestamp.isoformat(),
                        'cpu_percent': metrics.cpu_percent,
                        'memory_percent': metrics.memory_percent,
                        'memory_used_mb': metrics.memory_used_mb,
                        'disk_io_read_mb': metrics.disk_io_read_mb,
                        'disk_io_write_mb': metrics.disk_io_write_mb,
                        'network_sent_mb': metrics.network_sent_mb,
                        'network_recv_mb': metrics.network_recv_mb,
                        **metrics.custom_metrics
                    })
                return json.dumps(data, indent=2)
            
            elif format == 'csv':
                import csv
                import io
                
                output = io.StringIO()
                if self.metrics_history:
                    # 获取所有字段名
                    fieldnames = [
                        'timestamp', 'cpu_percent', 'memory_percent', 'memory_used_mb',
                        'disk_io_read_mb', 'disk_io_write_mb', 'network_sent_mb', 'network_recv_mb'
                    ]
                    
                    # 添加自定义指标字段
                    custom_fields = set()
                    for metrics in self.metrics_history:
                        custom_fields.update(metrics.custom_metrics.keys())
                    fieldnames.extend(sorted(custom_fields))
                    
                    writer = csv.DictWriter(output, fieldnames=fieldnames)
                    writer.writeheader()
                    
                    for metrics in self.metrics_history:
                        row = {
                            'timestamp': metrics.timestamp.isoformat(),
                            'cpu_percent': metrics.cpu_percent,
                            'memory_percent': metrics.memory_percent,
                            'memory_used_mb': metrics.memory_used_mb,
                            'disk_io_read_mb': metrics.disk_io_read_mb,
                            'disk_io_write_mb': metrics.disk_io_write_mb,
                            'network_sent_mb': metrics.network_sent_mb,
                            'network_recv_mb': metrics.network_recv_mb,
                            **metrics.custom_metrics
                        }
                        writer.writerow(row)
                
                return output.getvalue()
            
            else:
                raise ValueError(f"不支持的导出格式: {format}")
    
    def __enter__(self):
        """上下文管理器入口"""
        self.start_collection()
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        """上下文管理器出口"""
        self.stop_collection()
