import torch
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from typing import Dict, Tuple, Optional
import torch.nn.functional as F


class GradientAnalyzer:
    """梯度分析器"""
    
    def __init__(self, model: torch.nn.Module, save_dir: Path):
        self.model = model
        self.save_dir = Path(save_dir)
        self.save_dir.mkdir(parents=True, exist_ok=True)
        
    def compute_gradients(self, input1: torch.Tensor, input2: torch.Tensor, 
                         target: torch.Tensor) -> Dict[str, torch.Tensor]:
        """计算输入相对于输出的梯度"""
        # 确保输入需要梯度
        input1.requires_grad_(True)
        input2.requires_grad_(True)
        
        # 前向传播
        output = self.model(input1, input2)
        
        # 计算损失 (MSE)
        loss = F.mse_loss(output, target)
        
        # 反向传播
        loss.backward()
        
        gradients = {
            'input1': input1.grad.detach().cpu() if input1.grad is not None else None,
            'input2': input2.grad.detach().cpu() if input2.grad is not None else None,
            'loss': loss.item()
        }
        
        return gradients
    
    def compute_saliency_maps(self, input1: torch.Tensor, input2: torch.Tensor,
                             target_class: Optional[int] = None) -> Dict[str, torch.Tensor]:
        """计算显著性图"""
        input1.requires_grad_(True)
        input2.requires_grad_(True)
        
        output = self.model(input1, input2)
        
        # 如果没有指定目标类别，使用输出的最大值
        if target_class is None:
            score = output.max()
        else:
            score = output[0, target_class] if len(output.shape) > 1 else output.sum()
        
        score.backward()
        
        saliency_maps = {
            'input1': torch.abs(input1.grad).detach().cpu() if input1.grad is not None else None,
            'input2': torch.abs(input2.grad).detach().cpu() if input2.grad is not None else None,
        }
        
        return saliency_maps
    
    def integrated_gradients(self, input1: torch.Tensor, input2: torch.Tensor,
                           baseline1: Optional[torch.Tensor] = None,
                           baseline2: Optional[torch.Tensor] = None,
                           steps: int = 50) -> Dict[str, torch.Tensor]:
        """计算积分梯度"""
        if baseline1 is None:
            baseline1 = torch.zeros_like(input1)
        if baseline2 is None:
            baseline2 = torch.zeros_like(input2)
        
        # 生成插值路径
        alphas = torch.linspace(0, 1, steps)
        
        integrated_grads1 = torch.zeros_like(input1)
        integrated_grads2 = torch.zeros_like(input2)
        
        for alpha in alphas:
            # 插值输入
            interp_input1 = baseline1 + alpha * (input1 - baseline1)
            interp_input2 = baseline2 + alpha * (input2 - baseline2)
            
            interp_input1.requires_grad_(True)
            interp_input2.requires_grad_(True)
            
            # 前向传播
            output = self.model(interp_input1, interp_input2)
            score = output.sum()
            
            # 计算梯度
            grads = torch.autograd.grad(score, [interp_input1, interp_input2], 
                                      create_graph=False, retain_graph=False)
            
            if grads[0] is not None:
                integrated_grads1 += grads[0].detach()
            if grads[1] is not None:
                integrated_grads2 += grads[1].detach()
        
        # 平均并乘以输入差值
        integrated_grads1 = integrated_grads1 * (input1 - baseline1) / steps
        integrated_grads2 = integrated_grads2 * (input2 - baseline2) / steps
        
        return {
            'input1': integrated_grads1.cpu(),
            'input2': integrated_grads2.cpu()
        }
    
    def visualize_gradients(self, gradients: Dict[str, torch.Tensor], 
                          inputs: Dict[str, torch.Tensor]):
        """可视化梯度"""
        fig, axes = plt.subplots(2, 3, figsize=(18, 12))
        fig.suptitle('Gradient Analysis', fontsize=16)
        
        # 处理input1梯度
        if gradients['input1'] is not None:
            grad1 = gradients['input1'][0]  # 取第一个batch
            input1 = inputs['input1'][0]
            
            # 原始输入
            if len(input1.shape) == 3:  # (C, H, W)
                axes[0, 0].imshow(input1[0].cpu(), cmap='gray')
            else:  # (H, W)
                axes[0, 0].imshow(input1.cpu(), cmap='gray')
            axes[0, 0].set_title('Input 1 (Sensor)')
            axes[0, 0].axis('off')
            
            # 梯度幅度
            if len(grad1.shape) == 3:
                grad_mag1 = torch.norm(grad1, dim=0)
            else:
                grad_mag1 = torch.abs(grad1)
            
            im1 = axes[0, 1].imshow(grad_mag1, cmap='hot')
            axes[0, 1].set_title('Gradient Magnitude 1')
            axes[0, 1].axis('off')
            plt.colorbar(im1, ax=axes[0, 1])
            
            # 梯度直方图
            axes[0, 2].hist(grad1.flatten().numpy(), bins=50, alpha=0.7)
            axes[0, 2].set_title('Gradient Distribution 1')
            axes[0, 2].set_xlabel('Gradient Value')
            axes[0, 2].set_ylabel('Frequency')
        
        # 处理input2梯度
        if gradients['input2'] is not None:
            grad2 = gradients['input2'][0]
            input2 = inputs['input2'][0]
            
            # 原始输入
            if len(input2.shape) == 3:
                axes[1, 0].imshow(input2[0].cpu(), cmap='gray')
            else:
                axes[1, 0].imshow(input2.cpu(), cmap='gray')
            axes[1, 0].set_title('Input 2 (DAS Image)')
            axes[1, 0].axis('off')
            
            # 梯度幅度
            if len(grad2.shape) == 3:
                grad_mag2 = torch.norm(grad2, dim=0)
            else:
                grad_mag2 = torch.abs(grad2)
            
            im2 = axes[1, 1].imshow(grad_mag2, cmap='hot')
            axes[1, 1].set_title('Gradient Magnitude 2')
            axes[1, 1].axis('off')
            plt.colorbar(im2, ax=axes[1, 1])
            
            # 梯度直方图
            axes[1, 2].hist(grad2.flatten().numpy(), bins=50, alpha=0.7)
            axes[1, 2].set_title('Gradient Distribution 2')
            axes[1, 2].set_xlabel('Gradient Value')
            axes[1, 2].set_ylabel('Frequency')
        
        plt.tight_layout()
        save_path = self.save_dir / 'gradient_analysis.png'
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"梯度分析图已保存: {save_path}")
