import torch
import torch.nn as nn
import torch.distributed as dist
from typing import Dict, Optional, Union, List
from ..core import ModelCompressor
from ..utils import setup_logger, get_memory_usage
from ..utils.performance import PerformanceAnalyzer

@ModelCompressor.register_compressor('mixed_precision')
class MixedPrecisionCompressor(ModelCompressor):
    """Advanced mixed precision training compressor with performance analysis and optimized saving.
    
    Attributes:
        enabled: Whether mixed precision is enabled
        opt_level: Optimization level ('O0', 'O1', 'O2', 'O3')
        loss_scale: Loss scaling factor (dynamic or float)
        layer_precision: Dictionary mapping layer names to precision or 'auto'
        grad_clip: Gradient clipping value
        loss_scale_window: Window size for dynamic loss scaling
        hysteresis: Hysteresis for dynamic loss scaling
        min_loss_scale: Minimum loss scale value
        max_loss_scale: Maximum loss scale value
        distributed: Distributed training configuration
        rank: Process rank in distributed training
        analyzer: Performance analyzer instance
    """
    
    def __init__(self, model: nn.Module, config):
        """Initialize mixed precision compressor with performance analysis.
        
        Args:
            model: Model to optimize.
            config: Configuration containing:
                - enabled: bool
                - opt_level: str ('O0'-'O3')
                - loss_scale: str or float ('dynamic' or float value)
                - layer_precision: dict or 'auto' (automatic precision selection)
                - grad_clip: float (gradient clipping value)
                - loss_scale_window: int (dynamic loss scale window)
                - hysteresis: int (dynamic loss scale hysteresis)
                - min_loss_scale: float (minimum loss scale)
                - max_loss_scale: float (maximum loss scale)
                - distributed: dict (Distributed training config)
                    - enabled: bool
                    - backend: str ('nccl', 'gloo', etc.)
                    - find_unused_params: bool
                - performance: dict (Performance analysis config)
                    - enabled: bool
                    - track_memory: bool
                    - track_flops: bool
        """
        super().__init__(model, config)
        self.enabled = getattr(config, 'enabled', True)
        self.opt_level = getattr(config, 'opt_level', 'O1')
        self.loss_scale = getattr(config, 'loss_scale', 'dynamic')
        self.layer_precision = getattr(config, 'layer_precision', {})
        self.grad_clip = getattr(config, 'grad_clip', None)
        self.loss_scale_window = getattr(config, 'loss_scale_window', 1000)
        self.hysteresis = getattr(config, 'hysteresis', 2)
        self.min_loss_scale = getattr(config, 'min_loss_scale', 1)
        self.max_loss_scale = getattr(config, 'max_loss_scale', 2**24)
        self.distributed = getattr(config, 'distributed', {})
        self.performance_cfg = getattr(config, 'performance', {})
        self.rank = 0
        
        # Initialize distributed training if enabled
        if self.distributed.get('enabled', False):
            self._init_distributed()
        
        # Initialize performance analyzer
        if self.performance_cfg.get('enabled', True):
            self.analyzer = PerformanceAnalyzer()
        
        # Initialize metrics
        self.metrics.update({
            'mixed_precision': self.enabled,
            'opt_level': self.opt_level,
            'loss_scale': str(self.loss_scale),
            'memory_before': get_memory_usage(),
            'speedup': 1.0,
            'distributed': self.distributed.get('enabled', False)
        })
        
        # Initialize AMP
        self.amp = None
        self.scaler = None
        if self.enabled:
            self._init_amp()
    
    def _init_distributed(self):
        """Initialize distributed training environment."""
        if not dist.is_available():
            self.logger.warning("Distributed training not available")
            return
            
        backend = self.distributed.get('backend', 'nccl')
        dist.init_process_group(backend)
        self.rank = dist.get_rank()
        
        # Wrap model for distributed training
        self.model = nn.parallel.DistributedDataParallel(
            self.model,
            find_unused_parameters=self.distributed.get('find_unused_params', False)
        )
        
        self.logger.info(f"Initialized distributed training (rank {self.rank})")
    
    def _init_amp(self):
        """Initialize AMP with auto fallback and comprehensive checks."""
        self.amp = None
        self.scaler = None
        
        # Try APEX first if available
        try:
            if self._check_hardware_compatibility():
                from apex import amp
                self.amp = amp
                self.logger.info("Using APEX for mixed precision training")
                return
            else:
                self.logger.warning("Hardware not fully compatible with APEX")
        except ImportError:
            pass
            
        # Fallback to PyTorch native AMP
        try:
            from torch.cuda import amp
            self.amp = amp
            self.scaler = torch.cuda.amp.GradScaler(
                init_scale=self.max_loss_scale,
                growth_factor=2.0,
                backoff_factor=0.5,
                growth_interval=self.loss_scale_window
            )
            self.logger.info("Using PyTorch native AMP for mixed precision training")
        except ImportError:
            self.logger.warning("Mixed precision requires either APEX or PyTorch>=1.6")
            self.enabled = False
    
    def _auto_tune_precision(self, train_loader, num_batches=10):
        """Automatically determine optimal precision for each layer."""
        if self.layer_precision != 'auto':
            return
            
        self.logger.info("Starting automatic precision tuning...")
        
        # Start performance analysis
        if hasattr(self, 'analyzer'):
            self.analyzer.start_timer()
        
        # [实现自动精度调整算法...]
        # 这里会分析每个层对精度降低的敏感度
        # 然后为每个层选择最佳精度
        
        self.layer_precision = {
            'conv1': 'fp16',
            'conv2': 'bf16',
            'fc1': 'fp32'
        }
        
        # Record precision distribution
        if hasattr(self, 'analyzer'):
            for layer, prec in self.layer_precision.items():
                self.analyzer.record_precision(layer, prec)
        
        self.metrics['auto_tuned_precision'] = True
        self.logger.info(f"Auto-tuned precision settings: {self.layer_precision}")
    
    def compress(self, train_loader=None) -> nn.Module:
        """Enable mixed precision training with performance analysis.
        
        Args:
            train_loader: Optional training data loader for auto-tuning
            
        Returns:
            Model prepared for mixed precision training.
        """
        if not self.enabled:
            return self.model
            
        self.logger.info(f"Enabling mixed precision with opt_level {self.opt_level}")
        
        # Auto-tune precision if requested
        if self.layer_precision == 'auto' and train_loader:
            self._auto_tune_precision(train_loader)
        
        # Apply precision settings
        self._apply_layer_precision()
        
        # Initialize AMP
        if hasattr(self.amp, 'initialize'):
            model, optimizer = self.amp.initialize(
                self.model,
                self.optimizer,
                opt_level=self.opt_level,
                loss_scale=self.loss_scale,
                max_loss_scale=self.max_loss_scale,
                min_loss_scale=self.min_loss_scale,
                loss_scale_window=self.loss_scale_window,
                hysteresis=self.hysteresis
            )
            self.model = model
            self.optimizer = optimizer
        elif self.amp is not None:
            self.scaler = torch.cuda.amp.GradScaler(
                init_scale=self.max_loss_scale,
                growth_factor=2.0,
                backoff_factor=0.5,
                growth_interval=self.loss_scale_window
            )
            
        return self.model
    
    def train_step(self, data, target):
        """Perform training step with performance tracking."""
        if hasattr(self, 'analyzer'):
            self.analyzer.start_timer()
        
        # Original training logic...
        loss, output = super().train_step(data, target)
        
        if hasattr(self, 'analyzer'):
            self.analyzer.record_batch(data.size(0), self.model)
        
        return loss, output
    
    def save_model(self, path: str, metadata: dict = None):
        """Save model with optimized format for mixed precision.
        
        Args:
            path: Path to save model
            metadata: Additional metadata to save with model
        """
        state = {
            'model_state': self.model.state_dict(),
            'precision_settings': self.layer_precision,
            'amp_state': self.scaler.state_dict() if hasattr(self, 'scaler') else None,
            'metrics': self.metrics
        }
        
        if metadata:
            state['metadata'] = metadata
            
        if hasattr(self, 'analyzer'):
            state['performance'] = self.analyzer.get_metrics()
        
        torch.save(state, path)
        self.logger.info(f"Saved model to {path} with mixed precision settings")
    
    def load_model(self, path: str):
        """Load model with mixed precision settings.
        
        Args:
            path: Path to saved model
        """
        state = torch.load(path)
        self.model.load_state_dict(state['model_state'])
        
        if 'precision_settings' in state:
            self.layer_precision = state['precision_settings']
            self._apply_layer_precision()
            
        if 'amp_state' in state and hasattr(self, 'scaler'):
            self.scaler.load_state_dict(state['amp_state'])
            
        if 'performance' in state and hasattr(self, 'analyzer'):
            self.analyzer = PerformanceAnalyzer()
            # Can't fully restore analyzer state, but can log metrics
            self.metrics.update(state['performance'])
        
        self.logger.info(f"Loaded model from {path} with mixed precision settings")
    
    # [保留其他原有方法...]