import torch
import time
from typing import Dict, Any
from collections import defaultdict

class PerformanceAnalyzer:
    """Advanced performance analysis tool for mixed precision training."""
    
    def __init__(self):
        self.metrics = defaultdict(list)
        self.start_time = None
        self.batch_times = []
        self.flops = 0
        self.memory_usage = []
        self.precision_stats = {}
        
    def start_timer(self):
        """Start performance timer."""
        self.start_time = time.time()
        
    def record_batch(self, batch_size: int, model: torch.nn.Module):
        """Record batch performance metrics."""
        if self.start_time is None:
            return
            
        batch_time = time.time() - self.start_time
        self.batch_times.append(batch_time)
        
        # Calculate approximate FLOPs
        if not self.flops:
            self.flops = self._estimate_flops(model, batch_size)
            
        # Record memory usage
        self.memory_usage.append(self._get_memory_usage())
        
        # Reset timer
        self.start_time = time.time()
        
    def record_precision(self, layer_name: str, precision: str):
        """Record layer precision statistics."""
        if precision not in self.precision_stats:
            self.precision_stats[precision] = []
        self.precision_stats[precision].append(layer_name)
        
    def get_metrics(self) -> Dict[str, Any]:
        """Get comprehensive performance metrics."""
        if not self.batch_times:
            return {}
            
        avg_batch_time = sum(self.batch_times) / len(self.batch_times)
        throughput = 1 / avg_batch_time if avg_batch_time else 0
        flops_per_sec = self.flops * throughput if self.flops else 0
        
        return {
            'avg_batch_time': avg_batch_time,
            'throughput': throughput,
            'estimated_flops': self.flops,
            'flops_per_sec': flops_per_sec,
            'memory_usage': self._avg_memory_usage(),
            'precision_distribution': self._precision_distribution()
        }
        
    def _estimate_flops(self, model: torch.nn.Module, batch_size: int) -> float:
        """Estimate FLOPs for model."""
        # This is a simplified estimation - in practice would use more accurate methods
        total_flops = 0
        for module in model.modules():
            if isinstance(module, torch.nn.Conv2d):
                # Approximate conv FLOPs: 2 * in_channels * out_channels * kernel_size^2 * output_size^2
                output_size = module(torch.randn(1, *module.in_channels, 32, 32)).shape[-1]
                total_flops += 2 * module.in_channels * module.out_channels * \
                              module.kernel_size[0] * module.kernel_size[1] * \
                              output_size * output_size * batch_size
            elif isinstance(module, torch.nn.Linear):
                # Approximate linear FLOPs: 2 * in_features * out_features
                total_flops += 2 * module.in_features * module.out_features * batch_size
                
        return total_flops
        
    def _get_memory_usage(self) -> float:
        """Get current GPU memory usage in MB."""
        if torch.cuda.is_available():
            return torch.cuda.memory_allocated() / (1024 * 1024)
        return 0
        
    def _avg_memory_usage(self) -> float:
        """Get average memory usage."""
        if not self.memory_usage:
            return 0
        return sum(self.memory_usage) / len(self.memory_usage)
        
    def _precision_distribution(self) -> Dict[str, Any]:
        """Get precision distribution statistics."""
        return {
            'counts': {k: len(v) for k, v in self.precision_stats.items()},
            'layers': self.precision_stats
        }