"""
对抗攻击检测算法 - V4.0零信任架构
检测和防御针对AI模型的对抗样本攻击

检测维度:
- 像素统计异常检测
- 频域分析
- 梯度相似性分析
- 空域模式异常
- 多模型集成验证

安全特性:
- 多层检测机制
- 自适应阈值
- 误报控制
- 实时性能优化
"""

import numpy as np
import torch
import torch.nn.functional as F
import cv2
import structlog
from typing import Dict, Any, Tuple, Optional, List
from scipy import ndimage
from scipy.fft import fft2, fftshift
import warnings
warnings.filterwarnings('ignore')

logger = structlog.get_logger()


class AdversarialDetectionError(Exception):
    """对抗检测相关异常"""
    pass


class AdversarialDetector:
    """对抗样本检测器 - 多维度检测"""

    def __init__(self,
                 enable_pixel_analysis: bool = True,
                 enable_frequency_analysis: bool = True,
                 enable_gradient_analysis: bool = True,
                 enable_spatial_analysis: bool = True,
                 sensitivity_level: str = 'medium'):
        """
        初始化对抗检测器

        Args:
            enable_pixel_analysis: 启用像素分析
            enable_frequency_analysis: 启用频域分析
            enable_gradient_analysis: 启用梯度分析
            enable_spatial_analysis: 启用空间分析
            sensitivity_level: 检测敏感度 ('low', 'medium', 'high')
        """
        self.enable_pixel_analysis = enable_pixel_analysis
        self.enable_frequency_analysis = enable_frequency_analysis
        self.enable_gradient_analysis = enable_gradient_analysis
        self.enable_spatial_analysis = enable_spatial_analysis

        # 根据敏感度设置阈值
        self.thresholds = self._get_thresholds(sensitivity_level)
        self.sensitivity_level = sensitivity_level

        # 统计信息
        self.detection_stats = {
            'total_checks': 0,
            'adversarial_detected': 0,
            'false_positives': 0,
            'detection_methods': {}
        }

    def _get_thresholds(self, sensitivity: str) -> Dict[str, float]:
        """根据敏感度获取检测阈值"""
        thresholds = {
            'low': {
                'noise_variance': 0.15,
                'entropy': 7.5,
                'high_frequency_ratio': 0.85,
                'gradient_std': 0.6,
                'spatial_entropy': 8.0,
                'pixel_std_deviation': 0.08
            },
            'medium': {
                'noise_variance': 0.10,
                'entropy': 7.0,
                'high_frequency_ratio': 0.80,
                'gradient_std': 0.5,
                'spatial_entropy': 7.5,
                'pixel_std_deviation': 0.06
            },
            'high': {
                'noise_variance': 0.05,
                'entropy': 6.5,
                'high_frequency_ratio': 0.75,
                'gradient_std': 0.4,
                'spatial_entropy': 7.0,
                'pixel_std_deviation': 0.04
            }
        }
        return thresholds.get(sensitivity, thresholds['medium'])

    def detect_adversarial(self, image_tensor: torch.Tensor) -> Dict[str, Any]:
        """
        检测对抗样本

        Args:
            image_tensor: 输入图像张量 (C, H, W) 或 (1, C, H, W)

        Returns:
            检测结果字典
        """
        try:
            # 确保张量格式正确
            if image_tensor.dim() == 4:
                image_tensor = image_tensor.squeeze(0)
            elif image_tensor.dim() != 3:
                raise ValueError(f"不支持的张量维度: {image_tensor.dim()}")

            # 转换为numpy进行分析
            if image_tensor.is_cuda:
                image_np = image_tensor.cpu().numpy()
            else:
                image_np = image_tensor.numpy()

            # 转换为HWC格式用于分析
            if image_np.shape[0] == 3:  # CHW格式
                image_np = np.transpose(image_np, (1, 2, 0))
            elif image_np.shape[2] != 3:  # 不是HWC格式
                raise ValueError(f"不支持的张量形状: {image_np.shape}")

            # 归一化到0-255范围
            image_np = (image_np * 255).astype(np.uint8)

            logger.debug("开始对抗样本检测",
                       shape=image_np.shape,
                       sensitivity=self.sensitivity_level)

            detection_results = {
                'is_adversarial': False,
                'confidence': 0.0,
                'detection_methods': {},
                'risk_factors': [],
                'analysis_details': {},
                'sensitivity_level': self.sensitivity_level
            }

            method_results = []

            # 像素统计分析
            if self.enable_pixel_analysis:
                pixel_result = self._analyze_pixel_statistics(image_np)
                method_results.append(pixel_result)

            # 频域分析
            if self.enable_frequency_analysis:
                freq_result = self._analyze_frequency_domain(image_np)
                method_results.append(freq_result)

            # 梯度分析
            if self.enable_gradient_analysis:
                gradient_result = self._analyze_gradients(image_np)
                method_results.append(gradient_result)

            # 空间模式分析
            if self.enable_spatial_analysis:
                spatial_result = self._analyze_spatial_patterns(image_np)
                method_results.append(spatial_result)

            # 综合分析结果
            self._combine_detection_results(method_results, detection_results)

            # 更新统计信息
            self.detection_stats['total_checks'] += 1
            if detection_results['is_adversarial']:
                self.detection_stats['adversarial_detected'] += 1

            logger.info("对抗样本检测完成",
                       is_adversarial=detection_results['is_adversarial'],
                       confidence=detection_results['confidence'],
                       methods_detected=len(detection_results['detection_methods']))

            return detection_results

        except Exception as e:
            logger.error("对抗检测过程失败", error=str(e))
            raise AdversarialDetectionError(f"对抗检测失败: {e}")

    def _analyze_pixel_statistics(self, image_np: np.ndarray) -> Dict[str, Any]:
        """像素统计分析"""
        try:
            # 转换为灰度图像进行分析
            gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)

            # 计算统计指标
            pixel_std = np.std(gray)
            pixel_mean = np.mean(gray)
            pixel_variance = np.var(gray)

            # 计算局部噪声水平
            laplacian_var = cv2.Laplacian(gray, cv2.CV_64F).var()
            noise_level = laplacian_var / 10000.0

            # 计算像素值的直方图熵
            hist, _ = np.histogram(gray, bins=256, density=True)
            hist = hist[hist > 0]  # 移除零值
            entropy = -np.sum(hist * np.log2(hist + 1e-10))

            # 计算像素值分布的偏度和峰度
            from scipy.stats import skew, kurtosis
            pixel_skew = skew(gray.flatten())
            pixel_kurtosis = kurtosis(gray.flatten())

            # 判断是否异常
            is_anomalous = False
            risk_factors = []

            if noise_level > self.thresholds['noise_variance']:
                is_anomalous = True
                risk_factors.append(f"噪声水平过高: {noise_level:.3f}")

            if entropy > self.thresholds['entropy']:
                is_anomalous = True
                risk_factors.append(f"熵值过高: {entropy:.2f}")

            if pixel_std < self.thresholds['pixel_std_deviation']:
                is_anomalous = True
                risk_factors.append(f"像素标准差过低: {pixel_std:.3f}")

            if abs(pixel_skew) > 2.0:
                is_anomalous = True
                risk_factors.append(f"像素分布偏斜: {pixel_skew:.2f}")

            result = {
                'method': 'pixel_statistics',
                'is_anomalous': is_anomalous,
                'confidence': min(1.0, (noise_level + entropy/10 + abs(pixel_skew)/5) / 3),
                'risk_factors': risk_factors,
                'details': {
                    'noise_level': noise_level,
                    'entropy': entropy,
                    'pixel_mean': pixel_mean,
                    'pixel_std': pixel_std,
                    'pixel_variance': pixel_variance,
                    'skewness': pixel_skew,
                    'kurtosis': pixel_kurtosis
                }
            }

            return result

        except Exception as e:
            logger.error("像素统计分析失败", error=str(e))
            return {
                'method': 'pixel_statistics',
                'is_anomalous': False,
                'confidence': 0.0,
                'risk_factors': [f"分析失败: {e}"],
                'details': {}
            }

    def _analyze_frequency_domain(self, image_np: np.ndarray) -> Dict[str, Any]:
        """频域分析"""
        try:
            # 转换为灰度图像
            gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)

            # 应用2D FFT
            fft = fft2(gray)
            fft_shift = fftshift(fft)
            magnitude_spectrum = np.abs(fft_shift)

            # 计算频域统计
            center_h, center_w = magnitude_spectrum.shape[0] // 2, magnitude_spectrum.shape[1] // 2
            radius = min(center_h, center_w) // 4

            # 创建频率掩模
            y, x = np.ogrid[:magnitude_spectrum.shape[0], :magnitude_spectrum.shape[1]]
            mask = ((x - center_w) ** 2 + (y - center_h) ** 2) > radius ** 2

            # 计算高频能量比
            total_energy = np.sum(magnitude_spectrum ** 2)
            high_freq_energy = np.sum(magnitude_spectrum[mask] ** 2)
            high_freq_ratio = high_freq_energy / (total_energy + 1e-10)

            # 计算频域熵
            freq_magnitude = magnitude_spectrum.flatten()
            freq_magnitude = freq_magnitude[freq_magnitude > 0]
            freq_entropy = -np.sum((freq_magnitude / np.sum(freq_magnitude)) *
                                np.log2(freq_magnitude / np.sum(freq_magnitude) + 1e-10))

            # 检测异常频率模式
            is_anomalous = False
            risk_factors = []

            if high_freq_ratio > self.thresholds['high_frequency_ratio']:
                is_anomalous = True
                risk_factors.append(f"高频成分过多: {high_freq_ratio:.3f}")

            if freq_entropy > 8.0:
                is_anomalous = True
                risk_factors.append(f"频域熵过高: {freq_entropy:.2f}")

            result = {
                'method': 'frequency_domain',
                'is_anomalous': is_anomalous,
                'confidence': min(1.0, high_freq_ratio * 1.2),
                'risk_factors': risk_factors,
                'details': {
                    'high_frequency_ratio': high_freq_ratio,
                    'frequency_entropy': freq_entropy,
                    'total_energy': total_energy,
                    'dominant_frequency': np.unravel_index(np.argmax(magnitude_spectrum), magnitude_spectrum.shape)
                }
            }

            return result

        except Exception as e:
            logger.error("频域分析失败", error=str(e))
            return {
                'method': 'frequency_domain',
                'is_anomalous': False,
                'confidence': 0.0,
                'risk_factors': [f"分析失败: {e}"],
                'details': {}
            }

    def _analyze_gradients(self, image_np: np.ndarray) -> Dict[str, Any]:
        """梯度分析"""
        try:
            # 转换为灰度图像
            gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)

            # 计算梯度
            grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
            grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)

            # 计算梯度幅值
            gradient_magnitude = np.sqrt(grad_x**2 + grad_y**2)
            gradient_std = np.std(gradient_magnitude)
            gradient_max = np.max(gradient_magnitude)

            # 计算梯度方向
            gradient_direction = np.arctan2(grad_y, grad_x)

            # 统计梯度分布
            grad_hist, _ = np.histogram(gradient_magnitude.flatten(), bins=50, density=True)
            grad_hist = grad_hist[grad_hist > 0]
            grad_entropy = -np.sum(grad_hist * np.log2(grad_hist + 1e-10))

            # 检测异常梯度模式
            is_anomalous = False
            risk_factors = []

            if gradient_std > self.thresholds['gradient_std']:
                is_anomalous = True
                risk_factors.append(f"梯度标准差过高: {gradient_std:.3f}")

            if gradient_max > 1.0:  # 异常高的梯度值
                is_anomalous = True
                risk_factors.append(f"最大梯度值异常: {gradient_max:.3f}")

            # 检查梯度方向的均匀性
            direction_std = np.std(gradient_direction)
            if direction_std < 1.0:  # 梯度方向过于均匀
                is_anomalous = True
                risk_factors.append(f"梯度方向过于均匀: {direction_std:.3f}")

            result = {
                'method': 'gradient_analysis',
                'is_anomalous': is_anomalous,
                'confidence': min(1.0, gradient_std * 2),
                'risk_factors': risk_factors,
                'details': {
                    'gradient_std': gradient_std,
                    'gradient_max': gradient_max,
                    'gradient_entropy': grad_entropy,
                    'direction_std': direction_std
                }
            }

            return result

        except Exception as e:
            logger.error("梯度分析失败", error=str(e))
            return {
                'method': 'gradient_analysis',
                'is_anomalous': False,
                'confidence': 0.0,
                'risk_factors': [f"分析失败: {e}"],
                'details': {}
            }

    def _analyze_spatial_patterns(self, image_np: np.ndarray) -> Dict[str, Any]:
        """空间模式分析"""
        try:
            # 转换为灰度图像
            gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)

            # 计算局部二值模式 (LBP)
            def calculate_lbp(image, radius=3, n_points=24):
                lbp = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) if len(image.shape) == 3 else image
                return cv2.localBinaryPattern(lbp, n_points, radius, method='uniform')

            # 简化的LBP分析
            lbp = calculate_lbp(image_np)
            lbp_hist, _ = np.histogram(lbp.ravel(), bins=24, range=(0, 24))
            lbp_hist = lbp_hist.astype(float)
            lbp_hist /= (lbp_hist.sum() + 1e-7)

            # 计算空间熵
            spatial_entropy = -np.sum(lbp_hist * np.log2(lbp_hist + 1e-10))

            # 计算纹理复杂度
            edges = cv2.Canny(gray, 50, 150)
            edge_density = np.sum(edges > 0) / edges.size

            # 计算局部方差
            kernel = np.ones((5, 5), np.float32) / 25
            local_variance = cv2.filter2D(gray.astype(np.float32), -1, kernel)
            variance_std = np.std(local_variance)

            # 检测异常空间模式
            is_anomalous = False
            risk_factors = []

            if spatial_entropy > self.thresholds['spatial_entropy']:
                is_anomalous = True
                risk_factors.append(f"空间熵过高: {spatial_entropy:.2f}")

            if edge_density > 0.3:  # 边缘密度过高
                is_anomalous = True
                risk_factors.append(f"边缘密度过高: {edge_density:.3f}")

            if variance_std > 50.0:  # 局部方差异常
                is_anomalous = True
                risk_factors.append(f"局部方差异常: {variance_std:.1f}")

            result = {
                'method': 'spatial_analysis',
                'is_anomalous': is_anomalous,
                'confidence': min(1.0, (spatial_entropy / 10) + edge_density),
                'risk_factors': risk_factors,
                'details': {
                    'spatial_entropy': spatial_entropy,
                    'edge_density': edge_density,
                    'local_variance_std': variance_std
                }
            }

            return result

        except Exception as e:
            logger.error("空间模式分析失败", error=str(e))
            return {
                'method': 'spatial_analysis',
                'is_anomalous': False,
                'confidence': 0.0,
                'risk_factors': [f"分析失败: {e}"],
                'details': {}
            }

    def _combine_detection_results(self, method_results: List[Dict], final_result: Dict):
        """综合多个检测方法的结果"""
        anomalous_methods = []
        all_risk_factors = []
        total_confidence = 0.0
        method_count = 0

        for result in method_results:
            if result['is_anomalous']:
                anomalous_methods.append(result['method'])
                all_risk_factors.extend(result['risk_factors'])
                total_confidence += result['confidence']
                method_count += 1

            final_result['detection_methods'][result['method']] = {
                'detected': result['is_anomalous'],
                'confidence': result['confidence'],
                'risk_factors': result['risk_factors']
            }

        # 判断是否为对抗样本
        final_result['is_adversarial'] = len(anomalous_methods) >= 1

        # 计算综合置信度
        if method_count > 0:
            final_result['confidence'] = total_confidence / method_count
        else:
            final_result['confidence'] = 0.0

        # 汇总风险因素
        final_result['risk_factors'] = all_risk_factors

        # 添加检测统计
        final_result['methods_detected'] = len(anomalous_methods)
        final_result['total_methods'] = len(method_results)

        # 确定威胁等级
        if final_result['confidence'] > 0.8:
            final_result['threat_level'] = 'HIGH'
        elif final_result['confidence'] > 0.5:
            final_result['threat_level'] = 'MEDIUM'
        elif final_result['confidence'] > 0.2:
            final_result['threat_level'] = 'LOW'
        else:
            final_result['threat_level'] = 'MINIMAL'

    def get_detection_statistics(self) -> Dict[str, Any]:
        """获取检测统计信息"""
        return {
            'total_checks': self.detection_stats['total_checks'],
            'adversarial_detected': self.detection_stats['adversarial_detected'],
            'false_positives': self.detection_stats['false_positives'],
            'detection_rate': (self.detection_stats['adversarial_detected'] /
                             max(1, self.detection_stats['total_checks'])) * 100,
            'sensitivity_level': self.sensitivity_level
        }


# 全局检测器实例
_adversarial_detector = AdversarialDetector()


def check_adversarial_attack(image_tensor: torch.Tensor,
                            sensitivity_level: str = 'medium') -> Dict[str, Any]:
    """
    检测对抗攻击的便捷函数

    Args:
        image_tensor: 输入图像张量
        sensitivity_level: 检测敏感度

    Returns:
        检测结果字典
    """
    detector = AdversarialDetector(sensitivity_level=sensitivity_level)
    return detector.detect_adversarial(image_tensor)


# 导出主要类和函数
__all__ = [
    'AdversarialDetectionError',
    'AdversarialDetector',
    'check_adversarial_attack'
]