"""
Secure model loading system for EndoSight-UC V4.0
Prevents model deserialization attacks and unauthorized model execution
"""

import os
import hashlib
import struct
import json
import time
from pathlib import Path
from typing import Optional, Tuple, Dict, Any
import torch
import structlog

logger = structlog.get_logger()


class ModelSecurityError(Exception):
    """Model security related exceptions"""
    pass


class SecureModelLoader:
    """Secure model loader with signature verification and sandboxing"""

    def __init__(self, max_model_size_mb: int = 2000):
        self.max_model_size = max_model_size_mb * 1024 * 1024  # Convert to bytes
        self.allowed_model_types = {'.pth', '.pt', '.pkl'}

        # Trusted model signatures (pre-computed for legitimate models)
        self.trusted_signatures = {
            # Add SHA-256 hashes of trusted model files here
            # Format: "model_filename": "sha256_hash"
        }

    def verify_model_file(self, model_path: Path) -> Tuple[bool, str]:
        """
        Verify model file integrity and safety

        Returns:
            Tuple[bool, str]: (is_valid, error_message)
        """
        try:
            # 1. Check file existence and basic properties
            if not model_path.exists():
                return False, f"Model file not found: {model_path}"

            # 2. Check file size
            file_size = model_path.stat().st_size
            if file_size > self.max_model_size:
                return False, f"Model file too large: {file_size / (1024*1024):.1f}MB > {self.max_model_size / (1024*1024)}MB"

            if file_size < 1024:  # Less than 1KB is suspicious
                return False, f"Model file too small: {file_size} bytes"

            # 3. Check file extension
            if model_path.suffix.lower() not in self.allowed_model_types:
                return False, f"Invalid model extension: {model_path.suffix}"

            # 4. Calculate file hash
            file_hash = self._calculate_file_hash(model_path)
            logger.info("Model hash calculated", file_hash=file_hash[:16] + "...")

            # 5. Verify against trusted signatures (if available)
            if self.trusted_signatures:
                model_name = model_path.name
                if model_name in self.trusted_signatures:
                    expected_hash = self.trusted_signatures[model_name]
                    if file_hash != expected_hash:
                        return False, f"Model signature mismatch: expected {expected_hash[:16]}..., got {file_hash[:16]}..."
                    logger.info("Model signature verified", model_name=model_name)
                else:
                    logger.warning("Unknown model file, proceeding with caution", model_name=model_name)

            return True, "Model file verification passed"

        except Exception as e:
            logger.error("Model verification failed", error=str(e), model_path=str(model_path))
            return False, f"Model verification error: {str(e)}"

    def _calculate_file_hash(self, file_path: Path) -> str:
        """Calculate SHA-256 hash of model file"""
        sha256_hash = hashlib.sha256()
        with open(file_path, "rb") as f:
            # Read file in chunks to handle large files
            for chunk in iter(lambda: f.read(4096), b""):
                sha256_hash.update(chunk)
        return sha256_hash.hexdigest()

    def scan_model_content(self, model_path: Path) -> Tuple[bool, str]:
        """
        Scan model content for potential malicious code

        Returns:
            Tuple[bool, str]: (is_safe, scan_result)
        """
        try:
            with open(model_path, 'rb') as f:
                # Read first few KB to scan for suspicious patterns
                header = f.read(8192)

            # Check for suspicious patterns
            suspicious_patterns = [
                b'__reduce__',
                b'__getstate__',
                b'__setstate__',
                b'eval(',
                b'exec(',
                b'import os',
                b'subprocess',
                b'commands.getoutput',
                b'pickle.loads',
                b'marshal.loads',
            ]

            for pattern in suspicious_patterns:
                if pattern in header:
                    return False, f"Suspicious pattern detected: {pattern.decode('utf-8', errors='ignore')}"

            # Check for Python bytecode signatures
            magic_numbers = [b'\x33\x0d\x0d\x0a',  # Python 3.8
                           b'\x42\x0d\x0d\x0a',  # Python 3.9
                           b'\x3c\x0d\x0d\x0a']  # Python 3.10+

            for magic in magic_numbers:
                if header.startswith(magic):
                    return False, "Python bytecode detected in model file"

            return True, "Content scan passed"

        except Exception as e:
            logger.error("Model content scan failed", error=str(e))
            return False, f"Content scan error: {str(e)}"

    def load_model_safely(self, model_path: Path, device: torch.device) -> torch.nn.Module:
        """
        Load model with comprehensive security checks

        Args:
            model_path: Path to model file
            device: Target device for model loading

        Returns:
            Loaded PyTorch model

        Raises:
            ModelSecurityError: If any security check fails
        """
        logger.info("Starting secure model loading", model_path=str(model_path))

        # 1. Verify model file
        is_valid, message = self.verify_model_file(model_path)
        if not is_valid:
            raise ModelSecurityError(f"Model verification failed: {message}")

        # 2. Scan model content
        is_safe, scan_result = self.scan_model_content(model_path)
        if not is_safe:
            raise ModelSecurityError(f"Model content scan failed: {scan_result}")

        # 3. Load model with security precautions
        try:
            logger.info("Loading model with PyTorch secure loading")

            # Use map_location to prevent automatic device placement
            checkpoint = torch.load(model_path, map_location='cpu')

            # Validate checkpoint structure
            if not isinstance(checkpoint, dict):
                raise ModelSecurityError("Invalid checkpoint structure: expected dict")

            # Check for suspicious keys in checkpoint
            suspicious_keys = ['__python__', '__module__', '__main__']
            for key in checkpoint.keys():
                if any(sus in key.lower() for sus in suspicious_keys):
                    logger.warning("Suspicious checkpoint key detected", key=key)

            # Extract model state dict safely
            if 'model_state_dict' in checkpoint:
                state_dict = checkpoint['model_state_dict']
            else:
                state_dict = checkpoint

            if not isinstance(state_dict, dict):
                raise ModelSecurityError("Invalid state dict structure")

            # Validate tensor shapes and values
            for key, tensor in state_dict.items():
                if not isinstance(tensor, torch.Tensor):
                    raise ModelSecurityError(f"Invalid tensor type for key: {key}")

                # Check for extremely large tensors (potential DoS)
                if tensor.numel() > 1e9:  # More than 1 billion elements
                    raise ModelSecurityError(f"Tensor too large for key: {key}")

                # Check for NaN or Inf values
                if torch.isnan(tensor).any() or torch.isinf(tensor).any():
                    logger.warning("Model contains NaN or Inf values", key=key)

            # Create model instance (this should be done outside for security)
            logger.info("Model loading completed successfully")
            return state_dict  # Return state dict for safe loading outside

        except torch.serialization.pickle.UnpicklingError as e:
            raise ModelSecurityError(f"Model deserialization failed - possible malicious content: {e}")
        except Exception as e:
            logger.error("Model loading failed", error=str(e), exc_info=True)
            raise ModelSecurityError(f"Model loading failed: {str(e)}")

    def create_model_signature(self, model_path: Path) -> str:
        """Create and save signature for trusted model"""
        file_hash = self._calculate_file_hash(model_path)

        # Save signature to trusted signatures file
        sig_file = model_path.parent / f"{model_path.stem}.signature"
        with open(sig_file, 'w') as f:
            json.dump({
                'filename': model_path.name,
                'sha256': file_hash,
                'created_at': time.time(),
                'size': model_path.stat().st_size
            }, f, indent=2)

        logger.info("Model signature created", signature_file=str(sig_file))
        return file_hash

    def load_trusted_signatures(self, signatures_dir: Optional[Path] = None):
        """Load trusted model signatures from directory"""
        if signatures_dir is None:
            signatures_dir = Path(__file__).parent.parent / "trusted_signatures"

        if signatures_dir.exists():
            for sig_file in signatures_dir.glob("*.signature"):
                try:
                    with open(sig_file) as f:
                        sig_data = json.load(f)
                    self.trusted_signatures[sig_data['filename']] = sig_data['sha256']
                    logger.info("Loaded trusted signature", filename=sig_data['filename'])
                except Exception as e:
                    logger.error("Failed to load signature", file=str(sig_file), error=str(e))


# Global secure loader instance
secure_loader = SecureModelLoader()


def load_model_securely(model_path: str, device: torch.device, model_class=None, model_args=None) -> torch.nn.Module:
    """
    Convenience function to load model securely

    Args:
        model_path: Path to model file
        device: Target device
        model_class: Model class to instantiate (if needed)
        model_args: Arguments for model class instantiation

    Returns:
        Loaded and verified model
    """
    model_path = Path(model_path)

    # Load state dict securely
    state_dict = secure_loader.load_model_safely(model_path, device)

    # If model class provided, create instance and load state dict
    if model_class:
        model = model_class(**(model_args or {}))
        model.load_state_dict(state_dict, strict=True)
        model.to(device)
        model.eval()
        return model

    # Otherwise return state dict for manual loading
    return state_dict


# Export main functions
__all__ = [
    'SecureModelLoader',
    'ModelSecurityError',
    'secure_loader',
    'load_model_securely'
]