import torch
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from typing import Dict, Optional, Tuple
import cv2
from scipy import ndimage


class SaliencyAnalyzer:
    """显著性分析器"""
    
    def __init__(self, model: torch.nn.Module, save_dir: Path):
        self.model = model
        self.save_dir = Path(save_dir)
        self.save_dir.mkdir(parents=True, exist_ok=True)
        
    def vanilla_saliency(self, input1: torch.Tensor, input2: torch.Tensor) -> Dict[str, torch.Tensor]:
        """普通显著性分析"""
        input1.requires_grad_(True)
        input2.requires_grad_(True)
        
        output = self.model(input1, input2)
        score = output.sum()
        
        grads = torch.autograd.grad(score, [input1, input2], create_graph=False)
        
        return {
            'input1': torch.abs(grads[0]).detach().cpu() if grads[0] is not None else None,
            'input2': torch.abs(grads[1]).detach().cpu() if grads[1] is not None else None
        }
    
    def smooth_grad(self, input1: torch.Tensor, input2: torch.Tensor, 
                   noise_level: float = 0.15, num_samples: int = 25) -> Dict[str, torch.Tensor]:
        """SmoothGrad显著性分析"""
        saliency_sum1 = torch.zeros_like(input1)
        saliency_sum2 = torch.zeros_like(input2)
        
        for _ in range(num_samples):
            # 添加噪声
            noise1 = torch.randn_like(input1) * noise_level * input1.std()
            noise2 = torch.randn_like(input2) * noise_level * input2.std()
            
            noisy_input1 = input1 + noise1
            noisy_input2 = input2 + noise2
            
            noisy_input1.requires_grad_(True)
            noisy_input2.requires_grad_(True)
            
            output = self.model(noisy_input1, noisy_input2)
            score = output.sum()
            
            grads = torch.autograd.grad(score, [noisy_input1, noisy_input2], create_graph=False)
            
            if grads[0] is not None:
                saliency_sum1 += torch.abs(grads[0]).detach()
            if grads[1] is not None:
                saliency_sum2 += torch.abs(grads[1]).detach()
        
        return {
            'input1': (saliency_sum1 / num_samples).cpu(),
            'input2': (saliency_sum2 / num_samples).cpu()
        }
    
    def guided_backprop(self, input1: torch.Tensor, input2: torch.Tensor) -> Dict[str, torch.Tensor]:
        """引导反向传播"""
        # 修改ReLU的反向传播
        def relu_hook_function(module, grad_in, grad_out):
            if isinstance(module, torch.nn.ReLU):
                return (torch.clamp(grad_in[0], min=0.0),)
        
        # 注册钩子
        hooks = []
        for module in self.model.modules():
            if isinstance(module, torch.nn.ReLU):
                hooks.append(module.register_backward_hook(relu_hook_function))
        
        input1.requires_grad_(True)
        input2.requires_grad_(True)
        
        output = self.model(input1, input2)
        score = output.sum()
        
        grads = torch.autograd.grad(score, [input1, input2], create_graph=False)
        
        # 移除钩子
        for hook in hooks:
            hook.remove()
        
        return {
            'input1': grads[0].detach().cpu() if grads[0] is not None else None,
            'input2': grads[1].detach().cpu() if grads[1] is not None else None
        }
    
    def grad_cam(self, input1: torch.Tensor, input2: torch.Tensor, 
                target_layer: str) -> Dict[str, torch.Tensor]:
        """Grad-CAM分析"""
        gradients = {}
        activations = {}
        
        def backward_hook(module, grad_input, grad_output):
            gradients['value'] = grad_output[0]
        
        def forward_hook(module, input, output):
            activations['value'] = output
        
        # 找到目标层
        target_module = None
        for name, module in self.model.named_modules():
            if name == target_layer:
                target_module = module
                break
        
        if target_module is None:
            print(f"未找到目标层: {target_layer}")
            return {'input1': None, 'input2': None}
        
        # 注册钩子
        backward_handle = target_module.register_backward_hook(backward_hook)
        forward_handle = target_module.register_forward_hook(forward_hook)
        
        # 前向传播
        output = self.model(input1, input2)
        score = output.sum()
        
        # 反向传播
        score.backward()
        
        # 计算Grad-CAM
        grads = gradients['value']
        acts = activations['value']
        
        # 全局平均池化获得权重
        weights = torch.mean(grads, dim=(2, 3), keepdim=True)
        
        # 加权求和
        cam = torch.sum(weights * acts, dim=1, keepdim=True)
        cam = torch.clamp(cam, min=0)
        
        # 移除钩子
        backward_handle.remove()
        forward_handle.remove()
        
        return {
            'cam': cam.detach().cpu(),
            'activations': acts.detach().cpu(),
            'gradients': grads.detach().cpu()
        }
    
    def visualize_saliency_maps(self, saliency_results: Dict[str, Dict[str, torch.Tensor]], 
                               inputs: Dict[str, torch.Tensor], method_name: str):
        """可视化显著性图"""
        num_methods = len(saliency_results)
        fig, axes = plt.subplots(2, num_methods + 1, figsize=(5 * (num_methods + 1), 10))
        fig.suptitle(f'Saliency Analysis - {method_name}', fontsize=16)
        
        if num_methods == 1:
            axes = axes.reshape(2, -1)
        
        # 显示原始输入
        input1_display = inputs['input1'][0]
        input2_display = inputs['input2'][0]
        
        if len(input1_display.shape) == 3:
            axes[0, 0].imshow(input1_display[0].cpu(), cmap='gray', aspect='auto') # Added aspect='auto'
        else:
            axes[0, 0].imshow(input1_display.cpu(), cmap='gray', aspect='auto') # Added aspect='auto'
        axes[0, 0].set_title('Input 1 (Sensor)')
        axes[0, 0].axis('off')
        
        if len(input2_display.shape) == 3:
            axes[1, 0].imshow(input2_display[0].cpu(), cmap='gray')
        else:
            axes[1, 0].imshow(input2_display.cpu(), cmap='gray')
        axes[1, 0].set_title('Input 2 (DAS)')
        axes[1, 0].axis('off')
        
        # 显示显著性图
        for idx, (method, saliency) in enumerate(saliency_results.items()):
            col = idx + 1
            
            if saliency['input1'] is not None:
                sal1 = saliency['input1'][0].clone().detach().cpu() # Clone and move to CPU
                if len(sal1.shape) == 3:
                    sal1 = torch.mean(sal1, dim=0)
                
                # Normalize saliency map to [0, 1]
                sal1_min = sal1.min()
                sal1_max = sal1.max()
                if sal1_max - sal1_min > 1e-8: # Avoid division by zero or very small range
                    sal1_normalized = (sal1 - sal1_min) / (sal1_max - sal1_min)
                else:
                    sal1_normalized = torch.zeros_like(sal1)
                
                im1 = axes[0, col].imshow(sal1_normalized, cmap='hot', aspect='auto') # Added aspect='auto'
                axes[0, col].set_title(f'{method} - Input 1')
                axes[0, col].axis('off')
                plt.colorbar(im1, ax=axes[0, col])
            
            if saliency['input2'] is not None:
                sal2 = saliency['input2'][0].clone().detach().cpu() # Clone and move to CPU
                if len(sal2.shape) == 3:
                    sal2 = torch.mean(sal2, dim=0)

                # Normalize saliency map to [0, 1]
                sal2_min = sal2.min()
                sal2_max = sal2.max()
                if sal2_max - sal2_min > 1e-8: # Avoid division by zero or very small range
                    sal2_normalized = (sal2 - sal2_min) / (sal2_max - sal2_min)
                else:
                    sal2_normalized = torch.zeros_like(sal2)
                
                im2 = axes[1, col].imshow(sal2_normalized, cmap='hot')
                axes[1, col].set_title(f'{method} - Input 2')
                axes[1, col].axis('off')
                plt.colorbar(im2, ax=axes[1, col])
        
        plt.tight_layout(rect=[0, 0, 1, 0.96]) # Adjust layout to make space for suptitle
        save_path = self.save_dir / f'saliency_analysis_{method_name.lower()}.png'
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"显著性分析图已保存: {save_path}")
    
    def comprehensive_saliency_analysis(self, input1: torch.Tensor, input2: torch.Tensor):
        """综合显著性分析"""
        methods = {
            'Vanilla': self.vanilla_saliency,
            'SmoothGrad': self.smooth_grad,
            'GuidedBackprop': self.guided_backprop
        }
        
        results = {}
        inputs = {'input1': input1, 'input2': input2}
        
        for method_name, method_func in methods.items():
            print(f"执行 {method_name} 分析...")
            try:
                result = method_func(input1.clone(), input2.clone())
                results[method_name] = result
            except Exception as e:
                print(f"{method_name} 分析失败: {e}")
                results[method_name] = {'input1': None, 'input2': None}
        
        # 可视化所有结果
        self.visualize_saliency_maps(results, inputs, 'Comprehensive')
        
        return results
