"""
AI Security Module for EndoSight-UC V4.0
Protects against adversarial attacks, model extraction, and AI-specific threats
"""

import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
import io
import cv2
import time
from typing import Tuple, Dict, Any, Optional
import structlog
from scipy import ndimage
from skimage import measure

logger = structlog.get_logger()


class AISecurityError(Exception):
    """AI security related exceptions"""
    pass


class AdversarialDetector:
    """Detect adversarial samples and malicious inputs"""

    def __init__(self):
        # Detection thresholds
        self.NOISE_THRESHOLD = 0.1  # 10% noise level threshold
        self.ENTROPY_THRESHOLD = 7.0  # High entropy threshold
        self.GRADIENT_THRESHOLD = 0.5  # Gradient magnitude threshold
        self.FREQUENCY_THRESHOLD = 0.8  # High frequency component threshold

    def analyze_image_noise(self, image_tensor: torch.Tensor) -> Dict[str, float]:
        """Analyze noise characteristics of the image"""
        try:
            # Convert to numpy for analysis
            if image_tensor.dim() == 4:
                img_np = image_tensor.squeeze(0).permute(1, 2, 0).cpu().numpy()
            else:
                img_np = image_tensor.permute(1, 2, 0).cpu().numpy()

            # Normalize to 0-255
            img_np = (img_np * 255).astype(np.uint8)

            # Calculate noise using Laplacian variance
            gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
            laplacian_var = cv2.Laplacian(gray, cv2.CV_64F).var()
            noise_level = laplacian_var / 10000.0  # Normalize

            # Calculate local entropy
            entropy = self._calculate_local_entropy(gray)

            # High frequency analysis using FFT
            freq_content = self._analyze_frequency_content(gray)

            return {
                'noise_level': float(noise_level),
                'entropy': float(entropy),
                'high_frequency_ratio': float(freq_content)
            }

        except Exception as e:
            logger.error("Image noise analysis failed", error=str(e))
            return {'noise_level': 0.0, 'entropy': 0.0, 'high_frequency_ratio': 0.0}

    def _calculate_local_entropy(self, image: np.ndarray, window_size: int = 7) -> float:
        """Calculate local entropy of image patches"""
        try:
            # Pad image for edge handling
            pad = window_size // 2
            padded = np.pad(image, pad, mode='reflect')

            entropies = []
            for i in range(image.shape[0]):
                for j in range(image.shape[1]):
                    patch = padded[i:i+window_size, j:j+window_size]
                    hist, _ = np.histogram(patch, bins=256, density=True)
                    hist = hist[hist > 0]  # Remove zero bins
                    entropy = -np.sum(hist * np.log2(hist + 1e-10))
                    entropies.append(entropy)

            return np.mean(entropies)
        except Exception:
            return 0.0

    def _analyze_frequency_content(self, image: np.ndarray) -> float:
        """Analyze high frequency content using FFT"""
        try:
            # Apply 2D FFT
            fft = np.fft.fft2(image)
            fft_shift = np.fft.fftshift(fft)

            # Create frequency mask (high frequencies in corners)
            h, w = fft_shift.shape
            center_h, center_w = h // 2, w // 2
            radius = min(center_h, center_w) // 4

            # Mask for high frequencies
            y, x = np.ogrid[:h, :w]
            mask = ((x - center_w) ** 2 + (y - center_h) ** 2) > radius ** 2

            # Calculate high frequency energy ratio
            total_energy = np.sum(np.abs(fft_shift) ** 2)
            high_freq_energy = np.sum(np.abs(fft_shift[mask]) ** 2)

            return high_freq_energy / (total_energy + 1e-10)
        except Exception:
            return 0.0

    def detect_gradient_anomalies(self, image_tensor: torch.Tensor) -> Dict[str, float]:
        """Detect gradient-based adversarial patterns"""
        try:
            # Convert to grayscale gradient
            if image_tensor.dim() == 4:
                gray = 0.299 * image_tensor[0, 0] + 0.587 * image_tensor[0, 1] + 0.114 * image_tensor[0, 2]
            else:
                gray = 0.299 * image_tensor[0] + 0.587 * image_tensor[1] + 0.114 * image_tensor[2]

            # Calculate gradients
            grad_x = torch.abs(gray[:, 1:] - gray[:, :-1])
            grad_y = torch.abs(gray[1:, :] - gray[:-1, :])

            # Statistics
            max_grad = max(torch.max(grad_x).item(), torch.max(grad_y).item())
            mean_grad = torch.mean(grad_x).item() + torch.mean(grad_y).item()
            std_grad = torch.std(grad_x).item() + torch.std(grad_y).item()

            return {
                'max_gradient': float(max_grad),
                'mean_gradient': float(mean_grad),
                'std_gradient': float(std_grad)
            }

        except Exception as e:
            logger.error("Gradient analysis failed", error=str(e))
            return {'max_gradient': 0.0, 'mean_gradient': 0.0, 'std_gradient': 0.0}

    def detect_adversarial_patterns(self, image_bytes: bytes) -> Tuple[bool, Dict[str, Any]]:
        """
        Comprehensive adversarial pattern detection

        Returns:
            Tuple[bool, Dict]: (is_adversarial, analysis_details)
        """
        try:
            # Load image
            image = Image.open(io.BytesIO(image_bytes)).convert('RGB')

            # Convert to tensor
            transform = torch.transforms.Compose([
                torch.transforms.ToTensor(),
                torch.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
            ])
            image_tensor = transform(image).unsqueeze(0)

            # Run analyses
            noise_analysis = self.analyze_image_noise(image_tensor)
            gradient_analysis = self.detect_gradient_anomalies(image_tensor)

            # Decision logic
            is_adversarial = False
            reasons = []

            # Check noise level
            if noise_analysis['noise_level'] > self.NOISE_THRESHOLD:
                is_adversarial = True
                reasons.append(f"High noise level: {noise_analysis['noise_level']:.3f}")

            # Check entropy
            if noise_analysis['entropy'] > self.ENTROPY_THRESHOLD:
                is_adversarial = True
                reasons.append(f"High entropy: {noise_analysis['entropy']:.2f}")

            # Check high frequency content
            if noise_analysis['high_frequency_ratio'] > self.FREQUENCY_THRESHOLD:
                is_adversarial = True
                reasons.append(f"High frequency content: {noise_analysis['high_frequency_ratio']:.3f}")

            # Check gradient anomalies
            if gradient_analysis['max_gradient'] > self.GRADIENT_THRESHOLD:
                is_adversarial = True
                reasons.append(f"High gradient magnitude: {gradient_analysis['max_gradient']:.3f}")

            # Combine all analyses
            analysis_details = {
                'noise_analysis': noise_analysis,
                'gradient_analysis': gradient_analysis,
                'is_adversarial': is_adversarial,
                'reasons': reasons,
                'risk_score': self._calculate_risk_score(noise_analysis, gradient_analysis)
            }

            if is_adversarial:
                logger.warning("Adversarial pattern detected",
                             risk_score=analysis_details['risk_score'],
                             reasons=reasons)

            return is_adversarial, analysis_details

        except Exception as e:
            logger.error("Adversarial detection failed", error=str(e))
            return False, {'error': str(e)}

    def _calculate_risk_score(self, noise_analysis: Dict, gradient_analysis: Dict) -> float:
        """Calculate overall adversarial risk score (0-1)"""
        try:
            # Normalize individual metrics
            noise_score = min(noise_analysis['noise_level'] / self.NOISE_THRESHOLD, 1.0)
            entropy_score = min(noise_analysis['entropy'] / self.ENTROPY_THRESHOLD, 1.0)
            freq_score = min(noise_analysis['high_frequency_ratio'] / self.FREQUENCY_THRESHOLD, 1.0)
            grad_score = min(gradient_analysis['max_gradient'] / self.GRADIENT_THRESHOLD, 1.0)

            # Weighted combination
            risk_score = (noise_score * 0.3 + entropy_score * 0.2 +
                         freq_score * 0.3 + grad_score * 0.2)

            return min(risk_score, 1.0)
        except Exception:
            return 0.0


class ModelExtractionMonitor:
    """Monitor and prevent model extraction attacks"""

    def __init__(self):
        self.request_history = []
        self.similar_queries_threshold = 0.95
        self.max_queries_per_minute = 100
        self.max_similar_queries = 10

    def monitor_query(self, query_features: Dict[str, Any], client_ip: str) -> Dict[str, Any]:
        """Monitor query for potential model extraction"""
        try:
            current_time = torch.cuda.Event(enable_timing=True) if torch.cuda.is_available() else None
            timestamp = time.time()

            # Record query
            query_record = {
                'timestamp': timestamp,
                'client_ip': client_ip,
                'features': query_features
            }
            self.request_history.append(query_record)

            # Clean old records (older than 1 hour)
            cutoff_time = timestamp - 3600
            self.request_history = [r for r in self.request_history if r['timestamp'] > cutoff_time]

            # Analyze patterns
            analysis = self._analyze_query_patterns(client_ip, timestamp)

            return analysis

        except Exception as e:
            logger.error("Query monitoring failed", error=str(e))
            return {'suspicious': False, 'reason': 'monitoring_error'}

    def _analyze_query_patterns(self, client_ip: str, current_time: float) -> Dict[str, Any]:
        """Analyze query patterns for extraction attempts"""
        try:
            # Filter queries from this IP
            ip_queries = [q for q in self.request_history if q['client_ip'] == client_ip]

            # Check query frequency
            recent_queries = [q for q in ip_queries if current_time - q['timestamp'] < 60]
            if len(recent_queries) > self.max_queries_per_minute:
                return {
                    'suspicious': True,
                    'reason': 'high_query_frequency',
                    'frequency': len(recent_queries)
                }

            # Check for similar queries (potential systematic exploration)
            if len(ip_queries) > self.max_similar_queries:
                similarity_score = self._calculate_query_similarity(ip_queries)
                if similarity_score > self.similar_queries_threshold:
                    return {
                        'suspicious': True,
                        'reason': 'similar_query_pattern',
                        'similarity': similarity_score
                    }

            return {
                'suspicious': False,
                'query_count': len(ip_queries),
                'recent_count': len(recent_queries)
            }

        except Exception as e:
            logger.error("Pattern analysis failed", error=str(e))
            return {'suspicious': False, 'reason': 'analysis_error'}

    def _calculate_query_similarity(self, queries: list) -> float:
        """Calculate similarity between queries (simplified)"""
        try:
            if len(queries) < 2:
                return 0.0

            # Simple similarity based on feature statistics
            similarities = []
            for i in range(len(queries) - 1):
                for j in range(i + 1, len(queries)):
                    # Placeholder similarity calculation
                    # In practice, this would compare actual image features
                    similarity = np.random.random()  # Simplified
                    similarities.append(similarity)

            return np.mean(similarities)
        except Exception:
            return 0.0


class AISecurityValidator:
    """Main AI security validator combining all security checks"""

    def __init__(self):
        self.adversarial_detector = AdversarialDetector()
        self.extraction_monitor = ModelExtractionMonitor()
        self.validation_enabled = True

    def validate_input_security(self, image_bytes: bytes, client_ip: str = "unknown") -> Tuple[bool, Dict[str, Any]]:
        """
        Comprehensive AI security validation

        Returns:
            Tuple[bool, Dict]: (is_safe, security_report)
        """
        if not self.validation_enabled:
            return True, {'validation_enabled': False}

        try:
            security_report = {
                'adversarial_detection': {},
                'extraction_monitoring': {},
                'overall_safe': True,
                'risk_factors': []
            }

            # 1. Adversarial detection
            is_adversarial, adv_analysis = self.adversarial_detector.detect_adversarial_patterns(image_bytes)
            security_report['adversarial_detection'] = adv_analysis

            if is_adversarial:
                security_report['overall_safe'] = False
                security_report['risk_factors'].append('adversarial_input')

            # 2. Model extraction monitoring (using simplified features)
            query_features = self._extract_simple_features(image_bytes)
            extraction_analysis = self.extraction_monitor.monitor_query(query_features, client_ip)
            security_report['extraction_monitoring'] = extraction_analysis

            if extraction_analysis.get('suspicious', False):
                security_report['overall_safe'] = False
                security_report['risk_factors'].append('potential_extraction')

            # Log security events
            if not security_report['overall_safe']:
                logger.warning("AI security threat detected",
                             client_ip=client_ip,
                             risk_factors=security_report['risk_factors'],
                             adversarial_score=adv_analysis.get('risk_score', 0))

            return security_report['overall_safe'], security_report

        except Exception as e:
            logger.error("AI security validation failed", error=str(e))
            # Fail safe: if security validation fails, reject the input
            return False, {'error': str(e), 'overall_safe': False}

    def _extract_simple_features(self, image_bytes: bytes) -> Dict[str, Any]:
        """Extract simple features for monitoring (non-intrusive)"""
        try:
            image = Image.open(io.BytesIO(image_bytes))
            return {
                'size': image.size,
                'mode': image.mode,
                'format': image.format,
                'hash': hash(image_bytes)  # Simple hash for deduplication
            }
        except Exception:
            return {'size': (0, 0), 'mode': 'unknown', 'format': 'unknown'}


# Global security validator instance
ai_security_validator = AISecurityValidator()


# Export main functions
__all__ = [
    'AISecurityError',
    'AdversarialDetector',
    'ModelExtractionMonitor',
    'AISecurityValidator',
    'ai_security_validator'
]