import torch
import torch.nn as nn
from typing import Dict
from ..core import ModelCompressor
from ..utils import setup_logger

@ModelCompressor.register_compressor('quantization')
class QuantizationCompressor(ModelCompressor):
    """Quantization compressor for neural networks.
    
    Attributes:
        quant_type: Quantization type ('dynamic' or 'static')
        dtype: Target data type (torch.qint8 or torch.quint8)
        qconfig: Quantization configuration
    """
    
    def __init__(self, model: nn.Module, config):
        """Initialize quantization compressor.
        
        Args:
            model: Model to quantize.
            config: Configuration containing:
                - quant_type: 'dynamic' or 'static'
                - dtype: torch.qint8 or torch.quint8
                - qconfig: Optional custom quantization config
        """
        super().__init__(model, config)
        self.quant_type = getattr(config, 'quant_type', 'dynamic')
        self.dtype = getattr(config, 'dtype', torch.qint8)
        self.qconfig = getattr(config, 'qconfig', None)
        
        # Initialize metrics
        self.metrics.update({
            'quant_type': self.quant_type,
            'dtype': str(self.dtype),
            'custom_qconfig': self.qconfig is not None
        })
        
    def compress(self) -> nn.Module:
        """Apply quantization to the model.
        
        Returns:
            Quantized model.
            
        Raises:
            ValueError: If quantization type is invalid.
            RuntimeError: If quantization fails.
        """
        self.logger.info(f"Applying {self.quant_type} quantization with dtype {self.dtype}")
        
        try:
            if self.quant_type == 'dynamic':
                quantized_model = torch.quantization.quantize_dynamic(
                    self.model,
                    {nn.Linear, nn.Conv2d},
                    dtype=self.dtype
                )
            elif self.quant_type == 'static':
                if self.qconfig is None:
                    self.model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
                else:
                    self.model.qconfig = self.qconfig
                    
                # Prepare model for static quantization
                self.model.eval()
                self.model = torch.quantization.prepare(self.model, inplace=False)
                
                # Note: Actual conversion happens after calibration
                quantized_model = self.model
            else:
                raise ValueError(f"Invalid quantization type: {self.quant_type}")
                
            self._update_quantization_metrics()
            return quantized_model
            
        except Exception as e:
            self.logger.error(f"Quantization failed: {str(e)}")
            raise RuntimeError(f"Quantization failed: {str(e)}") from e
    
    def calibrate(self, calibration_loader):
        """Calibrate model for static quantization with detailed logging.
        
        Args:
            calibration_loader: Data loader for calibration.
        """
        if self.quant_type != 'static':
            self.logger.warning("Calibration only needed for static quantization")
            return
            
        self.logger.info("Starting calibration for static quantization")
        self.logger.info(f"Calibration dataset size: {len(calibration_loader.dataset)}")
        self.logger.info(f"Batch size: {calibration_loader.batch_size}")
        
        self.model.eval()
        start_time = time.time()
        processed_samples = 0
        
        with torch.no_grad():
            for batch_idx, (data, _) in enumerate(calibration_loader):
                _ = self.model(data)
                processed_samples += len(data)
                
                if (batch_idx + 1) % 10 == 0 or (batch_idx + 1) == len(calibration_loader):
                    self.logger.info(
                        f"Processed {processed_samples}/{len(calibration_loader.dataset)} "
                        f"samples ({100 * processed_samples / len(calibration_loader.dataset):.1f}%)"
                    )
        
        # Convert to quantized model
        self.logger.info("Converting to quantized model...")
        convert_start = time.time()
        self.model = torch.quantization.convert(self.model, inplace=False)
        convert_time = time.time() - convert_start
        
        self._update_quantization_metrics()
        total_time = time.time() - start_time
        
        self.logger.info(f"Calibration completed in {total_time:.2f} seconds")
        self.logger.info(f"Model conversion took {convert_time:.2f} seconds")
        self.logger.info(f"Final quantized model size: {self.metrics['quantized_size'] / 1024:.2f} KB")
    
    def _update_quantization_metrics(self):
        """Update metrics after quantization."""
        # Calculate model size reduction
        original_size = sum(p.numel() * p.element_size() for p in self.model.parameters())
        quantized_size = sum(p.numel() * p.element_size() for p in self.model.parameters())
        
        self.metrics.update({
            'original_size': original_size,
            'quantized_size': quantized_size,
            'size_reduction': (original_size - quantized_size) / original_size
        })
    
    def evaluate(self, test_loader) -> Dict[str, Any]:
        """Evaluate quantized model with comprehensive metrics.
        
        Args:
            test_loader: Data loader for evaluation.
            
        Returns:
            Dictionary containing evaluation metrics.
        """
        self.model.eval()
        criterion = nn.CrossEntropyLoss()
        total_loss = 0.0
        correct = 0
        total = 0
        total_time = 0.0
        
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(self.device), target.to(self.device)
                start_time = time.time()
                output = self.model(data)
                total_time += time.time() - start_time
                
                loss = criterion(output, target)
                total_loss += loss.item()
                
                _, predicted = torch.max(output.data, 1)
                total += target.size(0)
                correct += (predicted == target).sum().item()
        
        accuracy = 100 * correct / total
        avg_loss = total_loss / len(test_loader)
        avg_time = total_time / len(test_loader)
        
        # Update metrics
        self.metrics.update({
            'accuracy': accuracy,
            'loss': avg_loss,
            'inference_time': avg_time,
            'quant_type': self.quant_type,
            'dtype': str(self.dtype),
            'size_reduction': self.metrics['size_reduction'],
            'original_size': self.metrics['original_size'],
            'quantized_size': self.metrics['quantized_size']
        })
        
        return self.metrics.copy()

    def save_model(self, path: str, metadata: Optional[Dict] = None) -> None:
        """Save quantized model state and configuration.
        
        Args:
            path: Path to save model
            metadata: Additional metadata to save
        """
        state = {
            'model_state': self.model.state_dict(),
            'config': {
                'quant_type': self.quant_type,
                'dtype': self.dtype,
                'qconfig': self.qconfig
            },
            'metrics': self.metrics
        }
        
        if metadata:
            state['metadata'] = metadata
            
        torch.save(state, path)
        self.logger.info(f"Quantized model saved to {path}")

    def load_model(self, path: str) -> None:
        """Load quantized model state from file.
        
        Args:
            path: Path to saved model
        """
        state = torch.load(path)
        self.model.load_state_dict(state['model_state'])
        
        if 'config' in state:
            self.quant_type = state['config'].get('quant_type', self.quant_type)
            self.dtype = state['config'].get('dtype', self.dtype)
            self.qconfig = state['config'].get('qconfig', self.qconfig)
            
        if 'metrics' in state:
            self.metrics.update(state['metrics'])
            
        self.logger.info(f"Quantized model loaded from {path}")