# performance_monitor.py
import time
import psutil
import torch
from typing import Dict, List
from dataclasses import dataclass


@dataclass
class PerformanceMetrics:
    """性能指标"""
    throughput: float
    latency: float
    gpu_utilization: float
    memory_usage: float
    cache_hit_rate: float


class PerformanceMonitor:
    """性能监控器"""

    def __init__(self):
        self.metrics_history: List[PerformanceMetrics] = []
        self.start_time = time.time()

    def record_metrics(self, batch_size: int, batch_time: float) -> PerformanceMetrics:
        """记录性能指标"""
        # 计算吞吐量
        throughput = batch_size / batch_time

        # 获取GPU利用率
        gpu_utilization = self._get_gpu_utilization()

        # 获取内存使用
        memory_usage = self._get_memory_usage()

        # 计算缓存命中率（简化版）
        cache_hit_rate = 0.8  # 假设值

        metrics = PerformanceMetrics(
            throughput=throughput,
            latency=batch_time,
            gpu_utilization=gpu_utilization,
            memory_usage=memory_usage,
            cache_hit_rate=cache_hit_rate
        )

        self.metrics_history.append(metrics)
        return metrics

    def _get_gpu_utilization(self) -> float:
        """获取GPU利用率"""
        try:
            return torch.cuda.utilization() / 100.0
        except:
            return 0.0

    def _get_memory_usage(self) -> float:
        """获取内存使用率"""
        try:
            return torch.cuda.memory_allocated() / torch.cuda.get_device_properties(0).total_memory
        except:
            return 0.0

    def get_summary(self) -> Dict:
        """获取性能摘要"""
        if not self.metrics_history:
            return {}

        avg_throughput = sum(m.throughput for m in self.metrics_history) / len(self.metrics_history)
        avg_latency = sum(m.latency for m in self.metrics_history) / len(self.metrics_history)
        avg_gpu_util = sum(m.gpu_utilization for m in self.metrics_history) / len(self.metrics_history)

        return {
            'avg_throughput': avg_throughput,
            'avg_latency': avg_latency,
            'avg_gpu_utilization': avg_gpu_util,
            'total_requests': len(self.metrics_history),
            'total_time': time.time() - self.start_time
        }