import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
from typing import Dict, Any, List, Tuple
from ..core import ModelCompressor
from ..utils import setup_logger

@ModelCompressor.register_compressor('pruning')
class PruningCompressor(ModelCompressor):
    """Pruning compressor for neural network models.
    
    Attributes:
        sparsity: Target sparsity level (0-1).
        pruning_method: Pruning technique ('l1', 'random', etc.).
        global_pruning: Whether to prune globally across layers.
        parameters_to_prune: List of (module, name) tuples to prune.
    """
    
    def __init__(self, model: nn.Module, config):
        """Initialize pruning compressor.
        
        Args:
            model: Model to prune.
            config: Configuration containing:
                - sparsity: Target sparsity (0-1)
                - pruning_method: Pruning method ('l1', 'random')
                - global_pruning: Whether to prune globally (bool)
                - device: Device to use ('cuda' or 'cpu')
        """
        super().__init__(model, config)
        self.sparsity = getattr(config, 'sparsity', 0.5)
        self.pruning_method = getattr(config, 'pruning_method', 'l1')
        self.global_pruning = getattr(config, 'global_pruning', False)
        self.device = getattr(config, 'device', 'cuda' if torch.cuda.is_available() else 'cpu')
        
        # Move model to device
        self.model.to(self.device)
        
        # Initialize metrics
        self.metrics.update({
            'sparsity': self.sparsity,
            'pruning_method': self.pruning_method,
            'global_pruning': self.global_pruning
        })
        
        # Identify parameters to prune
        self.parameters_to_prune = self._identify_prunable_parameters()
        self._log_prunable_parameters()
    
    def _identify_prunable_parameters(self) -> List[Tuple[nn.Module, str]]:
        """Identify prunable parameters in the model.
        
        Returns:
            List of (module, parameter_name) tuples.
        """
        parameters = []
        for name, module in self.model.named_modules():
            if isinstance(module, (nn.Conv2d, nn.Linear)):
                parameters.append((module, 'weight'))
                if module.bias is not None:
                    parameters.append((module, 'bias'))
        return parameters
    
    def _log_prunable_parameters(self):
        """Log information about prunable parameters."""
        total_params = 0
        for module, param_name in self.parameters_to_prune:
            param = getattr(module, param_name)
            total_params += param.numel()
        
        self.logger.info(f"Found {len(self.parameters_to_prune)} prunable parameters")
        self.logger.info(f"Total prunable parameters: {total_params}")
        self.metrics['original_size'] = total_params
    
    def compress(self) -> nn.Module:
        """Apply pruning to the model.
        
        Returns:
            Pruned model with pruning masks applied.
        """
        self.logger.info(f"Starting {self.pruning_method} pruning with {self.sparsity*100}% sparsity")
        
        try:
            # Apply pruning
            if self.global_pruning:
                self._apply_global_pruning()
            else:
                self._apply_local_pruning()
            
            # Update metrics
            self._update_pruning_metrics()
            return self.model
            
        except Exception as e:
            self.logger.error(f"Pruning failed: {str(e)}")
            raise RuntimeError(f"Pruning failed: {str(e)}") from e
    
    def _apply_local_pruning(self):
        """Apply local pruning to each parameter separately."""
        for module, param_name in self.parameters_to_prune:
            if self.pruning_method == 'l1':
                prune.l1_unstructured(module, param_name, amount=self.sparsity)
            elif self.pruning_method == 'random':
                prune.random_unstructured(module, param_name, amount=self.sparsity)
            else:
                raise ValueError(f"Unknown pruning method: {self.pruning_method}")
    
    def _apply_global_pruning(self):
        """Apply global pruning across all parameters."""
        parameters = [(module, param_name) for module, param_name in self.parameters_to_prune]
        
        if self.pruning_method == 'l1':
            prune.global_unstructured(
                parameters,
                pruning_method=prune.L1Unstructured,
                amount=self.sparsity
            )
        elif self.pruning_method == 'random':
            prune.global_unstructured(
                parameters,
                pruning_method=prune.RandomUnstructured,
                amount=self.sparsity
            )
        else:
            raise ValueError(f"Unknown pruning method: {self.pruning_method}")
    
    def _update_pruning_metrics(self):
        """Calculate and update pruning metrics."""
        remaining_params = 0
        total_params = 0
        
        for module, param_name in self.parameters_to_prune:
            param = getattr(module, param_name)
            mask = getattr(module, f"{param_name}_mask", None)
            
            if mask is not None:
                remaining_params += mask.sum().item()
            total_params += param.numel()
        
        actual_sparsity = 1 - (remaining_params / total_params)
        self.metrics.update({
            'compressed_size': int(remaining_params),
            'actual_sparsity': actual_sparsity,
            'sparsity_deviation': abs(actual_sparsity - self.sparsity)
        })
    
    def evaluate(self, test_loader) -> Dict[str, Any]:
        """Evaluate pruned model with comprehensive metrics.
        
        Args:
            test_loader: Data loader for evaluation.
            
        Returns:
            Dictionary containing evaluation metrics.
        """
        self.model.eval()
        criterion = nn.CrossEntropyLoss()
        total_loss = 0.0
        correct = 0
        total = 0
        total_time = 0.0
        
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(self.device), target.to(self.device)
                start_time = time.time()
                output = self.model(data)
                total_time += time.time() - start_time
                
                loss = criterion(output, target)
                total_loss += loss.item()
                
                _, predicted = torch.max(output.data, 1)
                total += target.size(0)
                correct += (predicted == target).sum().item()
        
        accuracy = 100 * correct / total
        avg_loss = total_loss / len(test_loader)
        avg_time = total_time / len(test_loader)
        
        # Update metrics
        self.metrics.update({
            'accuracy': accuracy,
            'loss': avg_loss,
            'inference_time': avg_time,
            'sparsity': self.metrics['actual_sparsity'],
            'params': self.metrics['compressed_size'],
            'pruning_method': self.pruning_method,
            'global_pruning': self.global_pruning
        })
        
        return self.metrics.copy()
    
    def finalize(self):
        """Permanently remove pruned parameters."""
        for module, param_name in self.parameters_to_prune:
            prune.remove(module, param_name)