"""
性能监控模块，实现计时、内存使用等监控功能。
"""
import time
import torch
import psutil
import os
import threading
from typing import Dict, List, Optional, Union, Tuple, Any, Callable
from dataclasses import dataclass
from enum import Enum

class TimerEvent(Enum):
    """计时器事件类型"""
    FORWARD = "forward"
    BACKWARD = "backward"
    OPTIMIZER_STEP = "optimizer_step"
    DATA_LOADING = "data_loading"
    COMMUNICATION = "communication"
    TOTAL_STEP = "total_step"
    CUSTOM = "custom"

@dataclass
class TimerRecord:
    """计时记录"""
    event: TimerEvent
    tag: str
    start_time: float
    end_time: Optional[float] = None
    duration: Optional[float] = None
    step: Optional[int] = None
    metadata: Optional[Dict[str, Any]] = None

class PerformanceTimer:
    """性能计时器"""
    
    def __init__(self):
        """初始化计时器"""
        self.records: List[TimerRecord] = []
        self.active_timers: Dict[str, TimerRecord] = {}
    
    def start(self, 
              event: TimerEvent, 
              tag: str = "", 
              step: Optional[int] = None,
              metadata: Optional[Dict[str, Any]] = None) -> str:
        """
        开始计时
        
        Args:
            event: 事件类型
            tag: 标签
            step: 步数
            metadata: 元数据
        
        Returns:
            计时器ID
        """
        timer_id = f"{event.value}_{tag}_{time.time()}"
        record = TimerRecord(
            event=event,
            tag=tag,
            start_time=time.time(),
            step=step,
            metadata=metadata
        )
        self.active_timers[timer_id] = record
        return timer_id
    
    def stop(self, timer_id: str) -> Optional[float]:
        """
        停止计时
        
        Args:
            timer_id: 计时器ID
        
        Returns:
            持续时间（秒）
        """
        if timer_id not in self.active_timers:
            return None
        
        record = self.active_timers[timer_id]
        record.end_time = time.time()
        record.duration = record.end_time - record.start_time
        
        self.records.append(record)
        del self.active_timers[timer_id]
        
        return record.duration
    
    def get_records(self, 
                   event: Optional[TimerEvent] = None, 
                   tag: Optional[str] = None) -> List[TimerRecord]:
        """
        获取计时记录
        
        Args:
            event: 事件类型过滤
            tag: 标签过滤
        
        Returns:
            计时记录列表
        """
        filtered_records = self.records
        
        if event is not None:
            filtered_records = [r for r in filtered_records if r.event == event]
        
        if tag is not None:
            filtered_records = [r for r in filtered_records if r.tag == tag]
        
        return filtered_records
    
    def get_average_duration(self, 
                           event: Optional[TimerEvent] = None, 
                           tag: Optional[str] = None) -> Optional[float]:
        """
        获取平均持续时间
        
        Args:
            event: 事件类型过滤
            tag: 标签过滤
        
        Returns:
            平均持续时间（秒）
        """
        records = self.get_records(event, tag)
        if not records:
            return None
        
        return sum(r.duration for r in records) / len(records)
    
    def clear(self) -> None:
        """清除所有记录"""
        self.records.clear()
        self.active_timers.clear()
    
    def __enter__(self):
        """上下文管理器入口"""
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        """上下文管理器出口"""
        self.clear()


class MemoryMonitor:
    """内存监控器"""
    
    def __init__(self, interval: float = 1.0):
        """
        初始化内存监控器
        
        Args:
            interval: 监控间隔（秒）
        """
        self.interval = interval
        self.memory_records: List[Dict[str, Any]] = []
        self._stop_flag = False
        self._thread = None
    
    def start(self) -> None:
        """开始监控"""
        if self._thread is not None and self._thread.is_alive():
            return
        
        self._stop_flag = False
        self._thread = threading.Thread(target=self._monitor_loop, daemon=True)
        self._thread.start()
    
    def stop(self) -> None:
        """停止监控"""
        self._stop_flag = True
        if self._thread is not None:
            self._thread.join(timeout=2 * self.interval)
    
    def _monitor_loop(self) -> None:
        """监控循环"""
        while not self._stop_flag:
            self.memory_records.append(self._collect_memory_stats())
            time.sleep(self.interval)
    
    def _collect_memory_stats(self) -> Dict[str, Any]:
        """
        收集内存统计信息
        
        Returns:
            内存统计字典
        """
        stats = {
            "timestamp": time.time(),
            "system": {}
        }
        
        # 系统内存
        mem = psutil.virtual_memory()
        stats["system"] = {
            "total": mem.total,
            "available": mem.available,
            "used": mem.used,
            "percent": mem.percent
        }
        
        # 当前进程内存
        process = psutil.Process(os.getpid())
        process_mem = process.memory_info()
        stats["process"] = {
            "rss": process_mem.rss,  # 常驻内存
            "vms": process_mem.vms,  # 虚拟内存
        }
        
        # CUDA内存（如果可用）
        if torch.cuda.is_available():
            stats["cuda"] = {}
            device_count = torch.cuda.device_count()
            
            for i in range(device_count):
                stats["cuda"][f"gpu_{i}"] = {
                    "allocated": torch.cuda.memory_allocated(i),
                    "reserved": torch.cuda.memory_reserved(i),
                    "max_allocated": torch.cuda.max_memory_allocated(i),
                    "max_reserved": torch.cuda.max_memory_reserved(i)
                }
        
        return stats
    
    def get_records(self) -> List[Dict[str, Any]]:
        """
        获取内存记录
        
        Returns:
            内存记录列表
        """
        return self.memory_records
    
    def get_peak_memory(self, memory_type: str = "cuda") -> Dict[str, Any]:
        """
        获取峰值内存使用
        
        Args:
            memory_type: 内存类型（"cuda"、"system"或"process"）
        
        Returns:
            峰值内存字典
        """
        if not self.memory_records:
            return {}
        
        if memory_type == "cuda" and torch.cuda.is_available():
            device_count = torch.cuda.device_count()
            result = {}
            
            for i in range(device_count):
                key = f"gpu_{i}"
                peak_allocated = max(
                    record["cuda"][key]["allocated"] 
                    for record in self.memory_records 
                    if "cuda" in record and key in record["cuda"]
                )
                peak_reserved = max(
                    record["cuda"][key]["reserved"] 
                    for record in self.memory_records 
                    if "cuda" in record and key in record["cuda"]
                )
                result[key] = {
                    "peak_allocated": peak_allocated,
                    "peak_reserved": peak_reserved
                }
            
            return result
        
        elif memory_type == "system":
            return {
                "peak_used": max(record["system"]["used"] for record in self.memory_records),
                "peak_percent": max(record["system"]["percent"] for record in self.memory_records)
            }
        
        elif memory_type == "process":
            return {
                "peak_rss": max(record["process"]["rss"] for record in self.memory_records),
                "peak_vms": max(record["process"]["vms"] for record in self.memory_records)
            }
        
        return {}
    
    def clear(self) -> None:
        """清除记录"""
        self.memory_records.clear()
    
    def __enter__(self):
        """上下文管理器入口"""
        self.start()
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        """上下文管理器出口"""
        self.stop()


class ThroughputMonitor:
    """吞吐量监控器"""
    
    def __init__(self):
        """初始化吞吐量监控器"""
        self.samples_processed = 0
        self.tokens_processed = 0
        self.steps_completed = 0
        self.start_time = time.time()
        self.step_times: List[float] = []
    
    def reset(self) -> None:
        """重置监控器"""
        self.samples_processed = 0
        self.tokens_processed = 0
        self.steps_completed = 0
        self.start_time = time.time()
        self.step_times.clear()
    
    def update(self, samples: int = 0, tokens: int = 0, steps: int = 1) -> None:
        """
        更新统计信息
        
        Args:
            samples: 处理的样本数
            tokens: 处理的token数
            steps: 完成的步数
        """
        self.samples_processed += samples
        self.tokens_processed += tokens
        self.steps_completed += steps
        self.step_times.append(time.time())
    
    def get_samples_per_second(self) -> float:
        """
        获取每秒处理的样本数
        
        Returns:
            每秒样本数
        """
        elapsed = time.time() - self.start_time
        if elapsed <= 0 or self.samples_processed == 0:
            return 0.0
        return self.samples_processed / elapsed
    
    def get_tokens_per_second(self) -> float:
        """
        获取每秒处理的token数
        
        Returns:
            每秒token数
        """
        elapsed = time.time() - self.start_time
        if elapsed <= 0 or self.tokens_processed == 0:
            return 0.0
        return self.tokens_processed / elapsed
    
    def get_steps_per_second(self) -> float:
        """
        获取每秒完成的步数
        
        Returns:
            每秒步数
        """
        elapsed = time.time() - self.start_time
        if elapsed <= 0 or self.steps_completed == 0:
            return 0.0
        return self.steps_completed / elapsed
    
    def get_average_step_time(self) -> Optional[float]:
        """
        获取平均步时间
        
        Returns:
            平均步时间（秒）
        """
        if len(self.step_times) < 2:
            return None
        
        intervals = [self.step_times[i] - self.step_times[i-1] for i in range(1, len(self.step_times))]
        return sum(intervals) / len(intervals)
    
    def get_statistics(self) -> Dict[str, float]:
        """
        获取统计信息
        
        Returns:
            统计信息字典
        """
        return {
            "samples_per_second": self.get_samples_per_second(),
            "tokens_per_second": self.get_tokens_per_second(),
            "steps_per_second": self.get_steps_per_second(),
            "average_step_time": self.get_average_step_time() or 0.0,
            "total_samples": self.samples_processed,
            "total_tokens": self.tokens_processed,
            "total_steps": self.steps_completed,
            "elapsed_time": time.time() - self.start_time
        } 