import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Any, Optional, Union
from pathlib import Path
from ..core import ModelCompressor

@ModelCompressor.register_compressor('distillation')
class DistillationCompressor(ModelCompressor):
    """Knowledge distillation compressor."""
    
    def __init__(self, model: nn.Module, config):
        super().__init__(model, config)
        if not hasattr(config, 'teacher_model'):
            raise ValueError("teacher_model must be provided in config")
        if not isinstance(config.teacher_model, nn.Module):
            raise TypeError(f"teacher_model must be nn.Module, got {type(config.teacher_model)}")
            
        self.teacher_model = config.teacher_model
        self.temperature = getattr(config, 'temperature', 3.0)
        self.alpha = getattr(config, 'alpha', 0.5)  # balance between hard and soft targets
        self.device = getattr(config, 'device', 'cuda' if torch.cuda.is_available() else 'cpu')
        self.student_criterion = nn.CrossEntropyLoss()
        
        # Log initialization parameters
        self.logger.info(f"Initializing DistillationCompressor with:")
        self.logger.info(f"- Temperature: {self.temperature}")
        self.logger.info(f"- Alpha: {self.alpha}")
        self.logger.info(f"- Device: {self.device}")
        
        # Move models to device
        self.logger.info("Moving models to device...")
        self.model.to(self.device)
        self.teacher_model.to(self.device)
        self.logger.info(f"Student model parameters: {sum(p.numel() for p in self.model.parameters())}")
        self.logger.info(f"Teacher model parameters: {sum(p.numel() for p in self.teacher_model.parameters())}")
        
    def compress(self) -> nn.Module:
        """Train student model using knowledge distillation."""
        self.logger.info(f"Starting knowledge distillation with temperature {self.temperature}")
        
        # Student model is trained elsewhere, we just return it here
        return self.model
    
    def distill_loss(self, student_output, teacher_output, target):
        """Calculate distillation loss (soft target + hard target)."""
        # Soft target loss
        soft_loss = F.kl_div(
            F.log_softmax(student_output / self.temperature, dim=1),
            F.softmax(teacher_output / self.temperature, dim=1),
            reduction='batchmean') * (self.temperature ** 2)
        
        # Hard target loss
        hard_loss = self.student_criterion(student_output, target)
        
        # Combined loss
        return self.alpha * soft_loss + (1 - self.alpha) * hard_loss
    
    def train_step(self, train_loader, optimizer):
        """Perform one training step of distillation."""
        self.model.train()
        self.teacher_model.eval()
        
        total_loss = 0.0
        for data, target in train_loader:
            data, target = data.to(self.device), target.to(self.device)
            
            optimizer.zero_grad()
            
            # Forward passes
            with torch.no_grad():
                teacher_output = self.teacher_model(data)
            student_output = self.model(data)
            
            # Compute loss
            loss = self.distill_loss(student_output, teacher_output, target)
            
            # Backward pass
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
        
        return total_loss / len(train_loader)
    
    def evaluate(self, test_loader) -> Dict[str, Any]:
        """Evaluate student model with comprehensive metrics."""
        self.model.eval()
        correct = 0
        total = 0
        total_loss = 0.0
        total_time = 0.0
        
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(self.device), target.to(self.device)
                start_time = time.time()
                output = self.model(data)
                total_time += time.time() - start_time
                
                loss = self.student_criterion(output, target)
                total_loss += loss.item()
                
                _, predicted = torch.max(output.data, 1)
                total += target.size(0)
                correct += (predicted == target).sum().item()
                
        accuracy = 100 * correct / total
        teacher_acc = self._evaluate_teacher(test_loader)
        avg_loss = total_loss / len(test_loader)
        avg_time = total_time / len(test_loader)
        
        # Update metrics
        self.metrics.update({
            'accuracy': accuracy,
            'teacher_accuracy': teacher_acc,
            'loss': avg_loss,
            'inference_time': avg_time,
            'params': sum(p.numel() for p in self.model.parameters()),
            'temperature': self.temperature,
            'alpha': self.alpha
        })
        
        return self.metrics.copy()
    
    def _evaluate_teacher(self, test_loader):
        """Evaluate teacher model."""
        self.teacher_model.eval()
        correct = 0
        total = 0
        
        with torch.no_grad():
            for data, target in test_loader:
                output = self.teacher_model(data)
                _, predicted = torch.max(output.data, 1)
                total += target.size(0)
                correct += (predicted == target).sum().item()
                
        return 100 * correct / total

    def save_model(self, path: Union[str, Path], save_teacher: bool = False, metadata: Optional[dict] = None):
        """Save student model (and optionally teacher model) state.
        
        Args:
            path: Path to save model
            save_teacher: Whether to also save teacher model state
            metadata: Additional metadata to save
        """
        state = {
            'student_state': self.model.state_dict(),
            'config': {
                'temperature': self.temperature,
                'alpha': self.alpha,
                'device': self.device
            },
            'metrics': self.metrics
        }
        
        if save_teacher:
            state['teacher_state'] = self.teacher_model.state_dict()
            
        if metadata:
            state['metadata'] = metadata
            
        torch.save(state, str(path))
        self.logger.info(f"Model saved to {path}")

    def load_model(self, path: Union[str, Path], load_teacher: bool = False):
        """Load model state from file.
        
        Args:
            path: Path to saved model
            load_teacher: Whether to also load teacher model state
        """
        state = torch.load(str(path))
        self.model.load_state_dict(state['student_state'])
        
        if load_teacher and 'teacher_state' in state:
            self.teacher_model.load_state_dict(state['teacher_state'])
            
        if 'config' in state:
            self.temperature = state['config'].get('temperature', self.temperature)
            self.alpha = state['config'].get('alpha', self.alpha)
            self.device = state['config'].get('device', self.device)
            
        if 'metrics' in state:
            self.metrics.update(state['metrics'])
            
        self.logger.info(f"Model loaded from {path}")