# -*- coding: utf-8 -*-
"""
-------------------------------------------------
   File Name:       a_y_net.py
   Project Name:    beamform_AYnet
   Author :         Chunshan YANG
   Date:            2025/01/31
   Device:          GTX2070
-------------------------------------------------
   Change Activity:
                   2025/01/31
-------------------------------------------------
"""
from typing import Literal
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy.ndimage import zoom


#创建并返回一个3x3的二维卷积层。它接受输入通道数、输出通道数等参数，并将这些参数传递给 nn.Conv2d 构造函数，以创建一个具有指定属性的卷积层。
def conv3x3(in_channels, out_channels, stride=1, 
            padding=1, bias=True, groups=1):    
    return nn.Conv2d(
        in_channels,
        out_channels,
        kernel_size=3,
        stride=stride,
        padding=padding,
        bias=bias,
        groups=groups)

#实现了两种上采样方法：转置卷积和双线性插值加1x1卷积。根据参数 mode 的不同选择不同的上采样方式。
# 如果 mode 是 'transpose'，则使用转置卷积；否则使用双线性插值后接1x1卷积。
def upconv2x2(in_channels, out_channels, mode='transpose'):
    if mode == 'transpose':
        return nn.ConvTranspose2d(
            in_channels,
            out_channels,
            kernel_size=2,
            stride=2)
    else:
        # out_channels is always going to be the same
        # as in_channels
        return nn.Sequential(
            nn.Upsample(mode='bilinear', scale_factor=2),
            conv1x1(in_channels, out_channels))

def conv1x1(in_channels, out_channels, groups=1):
    return nn.Conv2d(
        in_channels,
        out_channels,
        kernel_size=1,
        groups=groups,
        stride=1)

# 其功能是进行两次3x3卷积操作，并在每次卷积后进行批归一化和Leaky ReLU激活。如果设置了池化参数，则还会进行最大池化操作。
# forward 方法返回池化前后的特征图。
class DownConv(nn.Module):
    def __init__(self, in_channels, out_channels, pooling=True):
        super(DownConv, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.pooling = pooling

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)

        if self.pooling:
            self.pool = nn.MaxPool2d(kernel_size=2, stride=2)

    def forward(self, x):
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01)
        before_pool = x
        if self.pooling:
            x = self.pool(x)
        return x, before_pool

#定义一个包含三个卷积层和两个批量归一化层的神经网络模块。
# 初始化时，定义了三层卷积层（其中两层使用 conv3x3 函数创建）和两层批量归一化层。
# 前向传播过程中，输入先经过第一个卷积层和批量归一化层并应用 LeakyReLU 激活函数，然后通过第二个卷积层，最后再经过第三个卷积层、批量归一化层和 LeakyReLU 激活函数。
class Bottom(nn.Module):
    def __init__(self,in_channels, out_channels):
        super(Bottom, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = nn.Conv2d(self.out_channels, self.out_channels,
                               kernel_size=(10, 3), stride=(10, 1),padding=(0,1))
        self.conv3 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)

    def forward(self, x):
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01)
        x = self.conv2(x)
        x = F.leaky_relu(self.bn2(self.conv3(x)), 0.01)

        return x

# 它包含两个卷积层和两个批量归一化层。
# 初始化时，设置输入和输出通道数，并创建卷积和批量归一化层。
# 前向传播过程中，输入先经过第一个卷积层和批量归一化层，再通过 Leaky ReLU 激活函数，然后重复这一过程。
class ImBottom(nn.Module):
    def __init__(self,in_channels, out_channels):
        super(ImBottom, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)

    def forward(self, x):
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01)

        return x

class Combine_Conv(nn.Module):
    def __init__(self, in_channels, out_channels, merge_mode='concat', dropout=0):
        super(Combine_Conv, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.merge_mode = merge_mode

        # 自适应权重学习
        self.weight_net = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_channels, in_channels//4, 1),
            nn.ReLU(),
            nn.Conv2d(in_channels//4, 2, 1),
            nn.Softmax(dim=1)
        )

        if self.merge_mode == 'concat':
            conv1_in_channels = in_channels
        elif self.merge_mode == 'add':
            conv1_in_channels = in_channels // 2
        else: # 'adaptive' merge_mode
            conv1_in_channels = in_channels // 2

        self.conv1 = conv3x3(conv1_in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)
        
        # Dropout for regularization
        self.dropout_layer = nn.Dropout2d(dropout) # Renamed for clarity if dropout param was a boolean
        self.dropout_rate = dropout

    def forward(self, rd, x):
        if self.merge_mode == 'concat':
            combine = torch.cat((rd, x), dim=1)
        elif self.merge_mode == 'add':
            combine = rd + x
        else:
            # 学习自适应权重
            concat_feat = torch.cat((rd, x), dim=1)
            weights = self.weight_net(concat_feat)
            w1, w2 = weights[:, 0:1], weights[:, 1:2]
            combine = w1 * rd + w2 * x

        combine = F.leaky_relu(self.bn1(self.conv1(combine)), 0.01)
        if self.dropout_rate > 0: # Apply dropout only if rate is positive
            combine = self.dropout_layer(combine)
        combine = F.leaky_relu(self.bn2(self.conv2(combine)), 0.01)

        return combine

# 这段代码定义了一个名为 up_conv 的 PyTorch 模块，用于上采样和卷积操作。主要功能如下：
#初始化时，根据指定的模式选择上采样方法（转置卷积或双线性插值），并定义两个3x3卷积层和对应的批量归一化层。
#前向传播时，先进行上采样，然后通过两个卷积层和批量归一化层，并使用 Leaky ReLU 激活函数。
class UpConv(nn.Module):
    def __init__(self, in_channels, out_channels, up_mode='transpose'):
        super(UpConv, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.up_mode = up_mode

        self.upconv = upconv2x2(self.in_channels, self.in_channels,
            mode=self.up_mode)

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)
    
    def forward(self, x):
        """ Forward pass
        """
        x = self.upconv(x)
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01)
        return x


# 定义了一个注意力机制模块 Attention_block，用于增强特征图的特定区域。主要步骤如下：
# 1.初始化时定义三个卷积层和一个ReLU激活函数。
# 2.在前向传播中，对输入的两个特征图分别进行卷积和批量归一化处理。
# 3.将处理后的特征图相加并通过ReLU激活。
# 4.使用Sigmoid函数生成注意力权重图。
# 5.将注意力权重图与原始特征图相乘，输出加权后的特征图。
class Attention_block(nn.Module):
    def __init__(self,F_g,F_l,F_int):
        super(Attention_block,self).__init__()
        self.W_g = nn.Sequential(
            nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True),
            nn.BatchNorm2d(F_int)
            )
        
        self.W_x = nn.Sequential(
            nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True),
            nn.BatchNorm2d(F_int)
        )

        self.psi = nn.Sequential(
            nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True),
            nn.BatchNorm2d(1),
            nn.Sigmoid()
        )
        
        self.relu = nn.ReLU(inplace=True)
        
    def forward(self,g,x):
        g1 = self.W_g(g)
        x1 = self.W_x(x)
        psi = self.relu(g1+x1)
        psi = self.psi(psi)

        return x*psi


class PatchProjectionEncoder(nn.Module):
    """
    Transforms an input tensor by extracting patches, projecting them to a new
    embedding dimension (which becomes the new channel count), and reshaping
    to a target spatial dimension.
    """
    def __init__(self, input_channels, 
                 patch_height, patch_width, 
                 stride_h, stride_w, 
                 embed_dim, 
                 use_bn=True, 
                 activation_fn=nn.LeakyReLU(0.01, inplace=True)):
        super(PatchProjectionEncoder, self).__init__()
        
        self.unfold = nn.Unfold(kernel_size=(patch_height, patch_width), 
                                stride=(stride_h, stride_w))
        
        # Calculate the number of features per patch after unfold: C_in * K_h * K_w
        self.features_per_patch = input_channels * patch_height * patch_width
        
        self.projection = nn.Linear(
            in_features=self.features_per_patch,
            out_features=embed_dim,
            bias=False,
        )
        self.embed_dim = embed_dim
        
        self.use_bn = use_bn
        if self.use_bn:
            # BatchNorm1d is applied to C in (N, C, L)
            self.bn = nn.BatchNorm1d(embed_dim)
        
        self.activation = activation_fn

        # Store kernel and stride for calculating output dimensions in forward pass
        self.kh, self.kw = (patch_height, patch_width)
        self.sh, self.sw = (stride_h, stride_w)

    def forward(self, x):
        # Input x: N x C_in x H_in x W_in
        N, C_in, H_in, W_in = x.shape
        
        patches = self.unfold(x)
        # patches shape: N x (C_in * patch_height * patch_width) x L
        # where L is the number of extracted patches (L = H_out * W_out)
        
        # 重塑为 (N, L, features_per_patch) 以便使用Linear层
        patches = patches.transpose(1, 2)  # N x L x features_per_patch
        
        projected_sequence = self.projection(patches)
        # projected_sequence shape: N x L x embed_dim
        
        # 转回到 (N, embed_dim, L) 格式以便后续处理
        projected_sequence = projected_sequence.transpose(1, 2)  # N x embed_dim x L

        if self.use_bn:
            projected_sequence = self.bn(projected_sequence)
        
        if self.activation is not None:
            projected_sequence = self.activation(projected_sequence)
        
        # Calculate the spatial dimensions of the output grid (H_out, W_out)
        # These are the number of patches in height and width dimensions respectively
        H_out = (H_in - self.kh) // self.sh + 1
        W_out = (W_in - self.kw) // self.sw + 1
        
        # Reshape the projected sequence into the target image format
        output_image = projected_sequence.view(N, self.embed_dim, H_out, W_out)
        # Expected output_image shape: N x embed_dim x H_out x W_out
        
        return output_image


class ReshapeSpaceToChannel(nn.Module):
    def __init__(self, block_size_h=10, block_size_w=1):
        super(ReshapeSpaceToChannel, self).__init__()
        self.block_size_h = block_size_h
        self.block_size_w = block_size_w
        # This module assumes input channel is 1 and output channel is block_size_h * block_size_w

    def forward(self, x):
        # x: N x C_in x H_in x W_in (e.g., N x 1 x 2560 x 64)
        N, C_in, H_in, W_in = x.shape
        
        assert H_in % self.block_size_h == 0, "Input height must be divisible by block_size_h"
        assert W_in % self.block_size_w == 0, "Input width must be divisible by block_size_w"
        
        H_out = H_in // self.block_size_h # 2560 / 10 = 256
        W_out = W_in // self.block_size_w # 64 / 1 = 64
        
        # Reshape and permute
        # (N, C_in, H_out*block_h, W_out*block_w) -> (N, C_in, H_out, block_h, W_out, block_w)
        x = x.view(N, C_in, H_out, self.block_size_h, W_out, self.block_size_w)
        # (N, C_in, H_out, block_h, W_out, block_w) -> (N, C_in, block_h, block_w, H_out, W_out)
        x = x.permute(0, 1, 3, 5, 2, 4).contiguous()
        # (N, C_in, block_h, block_w, H_out, W_out) -> (N, C_in*block_h*block_w, H_out, W_out)
        x = x.view(N, C_in * self.block_size_h * self.block_size_w, H_out, W_out)
        # For C_in=1, block_h=10, block_w=1 -> N x 10 x 256 x 64
        return x

# If you need a learnable layer to adjust to exactly 10 channels AFTER this,
# or if C_in * block_size_h * block_size_w is not 10:
class ReshapeSpaceToChannelEncoder(nn.Module):
    def __init__(self, in_channels_after_s2d, out_channels=10, block_size_h=10, block_size_w=1):
        super().__init__()
        self.s2d = ReshapeSpaceToChannel(block_size_h, block_size_w)
        # Channels after s2d will be C_in * block_size_h * block_size_w
        # For x (C_in=1), this is 1 * 10 * 1 = 10.
        # So, if the target is 10 channels, a 1x1 conv might just be for learning features.
        # If C_in was different, this conv1x1 would be essential for channel adjustment.
        self.conv1x1 = nn.Conv2d(in_channels_after_s2d, out_channels, kernel_size=1, bias=False)
        self.bn = nn.BatchNorm2d(out_channels)
        self.activation = nn.LeakyReLU(0.01)

    def forward(self, x):
        x = self.s2d(x) # N x (C_in*block_h*block_w) x H_out x W_out
        x = self.conv1x1(x) # N x out_channels x H_out x W_out
        x = self.bn(x)
        x = self.activation(x)
        return x


class SegmentationHead(nn.Module):
    """
    分割辅助头，用于从解码器特征生成二分类分割掩码
    """
    def __init__(self, in_channels, use_dropout=False, dropout_rate=0.1):
        super(SegmentationHead, self).__init__()
        
        # 分割特征提取层
        self.seg_conv1 = conv3x3(in_channels, in_channels // 2)
        self.seg_bn1 = nn.BatchNorm2d(in_channels // 2)
        
        self.seg_conv2 = conv3x3(in_channels // 2, in_channels // 4)
        self.seg_bn2 = nn.BatchNorm2d(in_channels // 4)
        
        # 可选的Dropout层用于正则化
        self.use_dropout = use_dropout
        if use_dropout:
            self.dropout = nn.Dropout2d(dropout_rate)
        
        # 最终分割输出层 - 固定为二分类，输出logits（不使用激活函数）
        self.seg_final = nn.Conv2d(in_channels // 4, 1, kernel_size=1)
        
        # 移除激活函数，直接输出logits用于BCEWithLogitsLoss
    
    def forward(self, x):
        # 特征提取
        x = F.leaky_relu(self.seg_bn1(self.seg_conv1(x)), 0.01)
        x = F.leaky_relu(self.seg_bn2(self.seg_conv2(x)), 0.01)
        
        # 可选Dropout
        if self.use_dropout:
            x = self.dropout(x)
        
        # 分割输出 - 直接输出logits
        seg_out = self.seg_final(x)
        
        return seg_out


class MultiScaleSegmentationHead(nn.Module):
    """
    多尺度分割头，融合不同层次的特征进行二分类分割
    """
    def __init__(self, feature_channels_list, use_dropout=False, dropout_rate=0.1):
        super(MultiScaleSegmentationHead, self).__init__()
        
        # 对不同尺度特征进行通道调整
        self.feature_adapters = nn.ModuleList()
        unified_channels = 64  # 统一通道数
        
        for channels in feature_channels_list:
            self.feature_adapters.append(
                nn.Sequential(
                    nn.Conv2d(channels, unified_channels, 1),
                    nn.BatchNorm2d(unified_channels),
                    nn.ReLU(inplace=True)
                )
            )
        
        # 特征融合
        self.fusion_conv = nn.Sequential(
            conv3x3(unified_channels * len(feature_channels_list), unified_channels),
            nn.BatchNorm2d(unified_channels),
            nn.ReLU(inplace=True)
        )
        
        # 分割头 - 固定为二分类
        self.seg_head = SegmentationHead(unified_channels, use_dropout, dropout_rate)
    
    def forward(self, features_list, target_size):
        # 调整所有特征到目标尺寸并统一通道
        adapted_features = []
        for i, features in enumerate(features_list):
            # 通道调整
            adapted = self.feature_adapters[i](features)
            # 尺寸调整到目标尺寸
            if adapted.shape[-2:] != target_size:
                adapted = F.interpolate(adapted, size=target_size, mode='bilinear', align_corners=False)
            adapted_features.append(adapted)
        
        # 特征融合
        fused_features = torch.cat(adapted_features, dim=1)
        fused_features = self.fusion_conv(fused_features)
        
        # 分割输出
        seg_out = self.seg_head(fused_features)
        
        return seg_out


# Model 1 modified Unet for beamforming
class AYNet(nn.Module):
    def __init__(self, in_channels=1, encoder_mode: Literal['patch', 'reshape'] = 'patch', up_mode='transpose', merge_mode='concat', enable_cam=False, 
                 enable_segmentation=False, seg_use_multiscale=False, seg_use_dropout=True, seg_dropout_rate=0.1):
        """
        Arguments:
            in_channels: int, number of channels in the input tensor.
                Default is 1 for single-channel ultrasound images.
            encoder_mode: string, type of encoder to use. Choices: 'patch' or 'reshape'
            up_mode: string, type of upconvolution. Choices: 'transpose'
                for transpose convolution or 'upsample' for nearest neighbour
                upsampling.
            merge_mode: string, type of merging. Choices: 'concat' or 'add'
            enable_cam: bool, whether to enable CAM/interpretability features
            enable_segmentation: bool, whether to enable binary segmentation auxiliary head
            seg_use_multiscale: bool, whether to use multi-scale segmentation head
            seg_use_dropout: bool, whether to use dropout in segmentation head
            seg_dropout_rate: float, dropout rate for segmentation head
        """
        super(AYNet, self).__init__()
        if up_mode in ('transpose', 'upsample'):
            self.up_mode = up_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for "
                             "upsampling. Only \"transpose\" and "
                             "\"upsample\" are allowed.".format(up_mode))
    
        if merge_mode in ('concat', 'add', 'adaptive'):
            self.merge_mode = merge_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for"
                             "merging up and down paths. "
                             "Only \"concat\" and "
                             "\"add\" are allowed.".format(merge_mode))

        # NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
        if self.up_mode == 'upsample' and self.merge_mode == 'add':
            raise ValueError("up_mode \"upsample\" is incompatible "
                             "with merge_mode \"add\" at the moment "
                             "because it doesn't make sense to use "
                             "nearest neighbour to reduce "
                             "depth channels (by half).")

        self.in_channels = in_channels
        self.encoder_mode = encoder_mode
        self.enable_segmentation = enable_segmentation
        self.seg_use_multiscale = seg_use_multiscale
        
        # 可解释性功能
        self.enable_cam = enable_cam
        self.activations = {}  # 存储中间激活
        self.gradients = {}    # 存储梯度用于Grad-CAM
        
        # Encoders to transform inputs to n x embed_dim x 128 x 64 (修改输出尺寸)
        embed_dim = 16
        if encoder_mode == 'patch':
            # For x: (1, 1, 2560, 64) -> (1, 16, 128, 64)
            self.x_encoder = PatchProjectionEncoder(
                input_channels=self.in_channels,
                patch_height=20, patch_width=1,
                stride_h=20, stride_w=1,
                embed_dim=embed_dim
            )
            # For img: (1, 1, 256, 128) -> (1, 16, 128, 64)
            self.img_encoder = PatchProjectionEncoder(
                input_channels=self.in_channels,
                patch_height=2, patch_width=2,
                stride_h=2, stride_w=2,
                embed_dim=embed_dim
            )
        else:  # reshape mode
            # For x: (1, 1, 2560, 64) -> (1, 16, 128, 64)
            self.x_encoder = ReshapeSpaceToChannelEncoder(
                in_channels_after_s2d=self.in_channels*20, out_channels=embed_dim,
                block_size_h=20, block_size_w=1
            )
            # For img: (1, 1, 256, 128) -> (1, 16, 128, 64)
            self.img_encoder = ReshapeSpaceToChannelEncoder(
                in_channels_after_s2d=self.in_channels*4, out_channels=embed_dim,
                block_size_h=2, block_size_w=2
            )
       
        # U-Net structure adapted for 16-channel inputs at 256x64 resolution
        self.down1 = DownConv(embed_dim, 32)       # 16 -> 32
        self.down2 = DownConv(32, 64)              # 32 -> 64
        self.down3 = DownConv(64, 128)             # 64 -> 128
        self.down4 = DownConv(128, 256, pooling=False)  # 128 -> 256, No pooling at bottom
        
        self.tdown1 = DownConv(embed_dim, 32)      # 16 -> 32
        self.tdown2 = DownConv(32, 64)             # 32 -> 64
        self.tdown3 = DownConv(64, 128)            # 64 -> 128
        self.tdown4 = DownConv(128, 256, pooling=False)  # 128 -> 256, No pooling at bottom

        self.combine4 = Combine_Conv(512, 256, merge_mode)  # 256+256 -> 256

        self.up3 = UpConv(256, 128, up_mode)      # 256 -> 128
        self.combine3 = Combine_Conv(256, 128, merge_mode)  # 128+128 -> 128
        self.att3 = Attention_block(F_g=128, F_l=128, F_int=64)
        self.combine_att3 = Combine_Conv(256, 128, merge_mode)  # 128+128 -> 128

        self.up2 = UpConv(128, 64, up_mode)       # 128 -> 64
        self.combine2 = Combine_Conv(128, 64, merge_mode)   # 64+64 -> 64    
        self.att2 = Attention_block(F_g=64, F_l=64, F_int=32)
        self.combine_att2 = Combine_Conv(128, 64, merge_mode)  # 64+64 -> 64

        self.up1 = UpConv(64, 32, up_mode)        # 64 -> 32
        self.combine1 = Combine_Conv(64, 32, merge_mode)    # 32+32 -> 32    
        self.att1 = Attention_block(F_g=32, F_l=32, F_int=16)
        self.combine_att1 = Combine_Conv(64, 32, merge_mode)  # 32+32 -> 32

        self.up0 = UpConv(32, 16, up_mode)        # 32 -> 16, (128,64) -> (256,128)
        self.final_conv = nn.Conv2d(16, 1, kernel_size=1)  # 16 -> 1

        self.final_act = nn.Tanh()

        # 分割相关组件 - 固定为二分类
        if self.enable_segmentation:
            if seg_use_multiscale:
                # 多尺度分割头，使用来自不同解码层的特征
                feature_channels = [16, 32, 64, 128]  # 来自不同解码层的通道数
                self.segmentation_head = MultiScaleSegmentationHead(
                    feature_channels_list=feature_channels,
                    use_dropout=seg_use_dropout,
                    dropout_rate=seg_dropout_rate
                )
            else:
                # 单尺度分割头，使用最终解码特征
                self.segmentation_head = SegmentationHead(
                    in_channels=16,  # d0的输出通道数
                    use_dropout=seg_use_dropout,
                    dropout_rate=seg_dropout_rate
                )

        self.reset_params()

        # 如果启用CAM，注册钩子
        if self.enable_cam:
            self._register_hooks()

    def _register_hooks(self):
        """注册前向和反向钩子以提取中间特征用于可视化"""
        def save_activation(name):
            def hook(module, input, output):
                if self.enable_cam:
                    if isinstance(output, tuple):
                        # 对于DownConv返回(output, before_pool)
                        self.activations[name] = output[0].detach()
                    else:
                        self.activations[name] = output.detach()
            return hook
        
        def save_gradient(name):
            def hook(module, grad_input, grad_output):
                if self.enable_cam:
                    if isinstance(grad_output[0], torch.Tensor):
                        self.gradients[name] = grad_output[0].detach()
            return hook
        
        # 注册关键层的钩子
        self.down4.register_forward_hook(save_activation('encoder_bottom'))
        self.down4.register_backward_hook(save_gradient('encoder_bottom'))
        
        self.up3.register_forward_hook(save_activation('decoder_3'))
        self.up3.register_backward_hook(save_gradient('decoder_3'))
        
        self.up2.register_forward_hook(save_activation('decoder_2'))
        self.up2.register_backward_hook(save_gradient('decoder_2'))
        
        self.up1.register_forward_hook(save_activation('decoder_1'))
        self.up1.register_backward_hook(save_gradient('decoder_1'))
        
        self.final_conv.register_forward_hook(save_activation('final_layer'))
        self.final_conv.register_backward_hook(save_gradient('final_layer'))

    @staticmethod
    def weight_init(m):
        if isinstance(m, nn.Conv2d):
            init.xavier_normal_(m.weight)
            if m.bias is not None:
                init.constant_(m.bias, 0)
        elif isinstance(m, nn.Linear):  # Add initialization for Linear layers
            init.xavier_normal_(m.weight)
            if m.bias is not None:
                init.constant_(m.bias, 0)
        elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):  # Add initialization for BatchNorm layers
            init.constant_(m.weight, 1)
            init.constant_(m.bias, 0)
        
    def reset_params(self):
        for i, m in enumerate(self.modules()):
            self.weight_init(m)

    def enable_visualization(self, enable=True):
        """启用/禁用可视化模式"""
        self.enable_cam = enable
        if not enable:
            self.activations.clear()
            self.gradients.clear()

    def get_activations(self):
        """获取中间激活用于可视化"""
        return self.activations
    
    def get_gradients(self):
        """获取梯度用于Grad-CAM"""
        return self.gradients

    def forward(self, x, bfimg, return_segmentation=None):
        """
        前向传播
        Args:
            x: 原始数据 (batch_size, 1, 2560, 64)
            bfimg: 波束形成图像 (batch_size, 1, 256, 128)
            return_segmentation: bool, 是否返回分割结果 (如果为None，则根据enable_segmentation决定)
        Returns:
            如果不启用分割: out (重建图像)
            如果启用分割: (out, seg_out) - (重建图像, 分割掩码)
        """
        # 确定是否返回分割结果
        if return_segmentation is None:
            return_segmentation = self.enable_segmentation
        
        # 清空之前的记录
        if self.enable_cam:
            self.activations.clear()
            self.gradients.clear()
        
        # 编码输入到统一格式: n x 16 x 128 x 64
        x_encoded = self.x_encoder(x)    # (1, 1, 2560, 64) -> (1, 16, 128, 64) 原始数据编码
        img_encoded = self.img_encoder(bfimg)  # (1, 1, 256, 128) -> (1, 16, 128, 64) 图像数据编码
        
        # 编码器1: 编码后的原始数据路径
        x1, before_pool1 = self.down1(x_encoded)  # (1, 16, 128, 64) -> x1:(1, 32, 64, 32), before_pool1:(1, 32, 128, 64)
        x2, before_pool2 = self.down2(x1)         # x2:(1, 64, 32, 16), before_pool2:(1, 64, 64, 32)
        x3, before_pool3 = self.down3(x2)         # x3:(1, 128, 16, 8), before_pool3:(1, 128, 32, 16)
        x4, before_pool4 = self.down4(x3)         # x4:(1, 256, 16, 8), before_pool4:(1, 256, 16, 8) - 无池化
        
        # 编码器2: 编码后的波束形成图像路径
        bx1, bxbefore_pool1 = self.tdown1(img_encoded)  # (1, 16, 128, 64) -> bx1:(1, 32, 64, 32), bxbefore_pool1:(1, 32, 128, 64)
        bx2, bxbefore_pool2 = self.tdown2(bx1)          # bx2:(1, 64, 32, 16), bxbefore_pool2:(1, 64, 64, 32)
        bx3, bxbefore_pool3 = self.tdown3(bx2)          # bx3:(1, 128, 16, 8), bxbefore_pool3:(1, 128, 32, 16)
        bx4, bxbefore_pool4 = self.tdown4(bx3)          # bx4:(1, 256, 16, 8), bxbefore_pool4:(1, 256, 16, 8) - 无池化

        # 底层特征融合
        x4_combined = self.combine4(x4, bx4)  # (1, 256, 16, 8) + (1, 256, 16, 8) -> (1, 256, 16, 8) 融合底层特征

        # 解码器 + 注意力机制
        # 第3层解码
        d3 = self.up3(x4_combined)  # (1, 256, 16, 8) -> (1, 128, 32, 16) 上采样恢复分辨率
        pool3_cat = self.combine3(before_pool3, bxbefore_pool3)  # (1, 128, 32, 16) + (1, 128, 32, 16) -> (1, 128, 32, 16) 融合跳跃连接
        att3 = self.att3(d3, pool3_cat)  # 注意力机制增强重要特征 -> (1, 128, 32, 16)
        att3_cat = self.combine_att3(att3, d3)  # (1, 128, 32, 16) + (1, 128, 32, 16) -> (1, 128, 32, 16) 融合注意力特征

        # 第2层解码
        d2 = self.up2(att3_cat)  # (1, 128, 32, 16) -> (1, 64, 64, 32) 继续上采样
        pool2_cat = self.combine2(before_pool2, bxbefore_pool2)  # (1, 64, 64, 32) + (1, 64, 64, 32) -> (1, 64, 64, 32) 融合跳跃连接
        att2 = self.att2(d2, pool2_cat)  # 第2层注意力机制 -> (1, 64, 64, 32)
        att2_cat = self.combine_att2(att2, d2)  # (1, 64, 64, 32) + (1, 64, 64, 32) -> (1, 64, 64, 32) 融合注意力特征

        # 第1层解码
        d1 = self.up1(att2_cat)  # (1, 64, 64, 32) -> (1, 32, 128, 64) 恢复到原始编码分辨率
        pool1_cat = self.combine1(before_pool1, bxbefore_pool1)  # (1, 32, 128, 64) + (1, 32, 128, 64) -> (1, 32, 128, 64) 融合跳跃连接
        att1 = self.att1(d1, pool1_cat)  # 第1层注意力机制 -> (1, 32, 128, 64)
        att1_cat = self.combine_att1(att1, d1)  # (1, 32, 128, 64) + (1, 32, 128, 64) -> (1, 32, 128, 64) 融合注意力特征

        # 最终上采样和输出
        d0 = self.up0(att1_cat)  # (1, 32, 128, 64) -> (1, 16, 256, 128) 上采样到最终分辨率

        # 重建输出
        out = self.final_conv(d0)  # (1, 16, 256, 128) -> (1, 1, 256, 128) 生成最终输出
        out = self.final_act(out)  # 应用Tanh激活函数，输出范围在[-1, 1]之间

        # 分割输出
        if return_segmentation and self.enable_segmentation:
            if self.seg_use_multiscale:
                # 收集多尺度特征进行分割
                multi_scale_features = [d0, d1, d2, d3]  # 不同解码层的特征
                seg_out = self.segmentation_head(multi_scale_features, target_size=(256, 128))
            else:
                # 使用最终解码特征进行分割
                seg_out = self.segmentation_head(d0)
            
            return out, seg_out
        else:
            return out


if __name__ == "__main__":
    """
    testing
    """
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using device: {device}")
    
    # 测试原始AYNet
    x = Variable(torch.FloatTensor(np.random.random((1, 1, 2560, 64))),requires_grad = True).to(device)
    img = Variable(torch.FloatTensor(np.random.random((1, 1, 256, 128))), requires_grad=True).to(device)
    
    # 测试不带分割的模型
    model = AYNet(in_channels=1, up_mode='upsample', merge_mode='concat', enable_segmentation=False).to(device)
    out = model(x, img)
    print(f"AYNet without segmentation output shape: {out.shape}")
    
    # 测试带单尺度分割的模型
    model_seg = AYNet(in_channels=1, up_mode='upsample', merge_mode='concat', 
                      enable_segmentation=True, seg_use_multiscale=False).to(device)
    out, seg_out = model_seg(x, img)
    print(f"AYNet with single-scale binary segmentation - reconstruction: {out.shape}, segmentation: {seg_out.shape}")
    
    # 测试带多尺度分割的模型
    model_seg_ms = AYNet(in_channels=1, up_mode='upsample', merge_mode='concat', 
                         enable_segmentation=True, seg_use_multiscale=True).to(device)
    out, seg_out = model_seg_ms(x, img)
    print(f"AYNet with multi-scale binary segmentation - reconstruction: {out.shape}, segmentation: {seg_out.shape}")
    
    # 测试带Dropout的分割模型
    model_seg_dropout = AYNet(in_channels=1, up_mode='upsample', merge_mode='concat', 
                              enable_segmentation=True, seg_use_multiscale=True, 
                              seg_use_dropout=True, seg_dropout_rate=0.2).to(device)
    out, seg_out = model_seg_dropout(x, img)
    print(f"AYNet with dropout binary segmentation - reconstruction: {out.shape}, segmentation: {seg_out.shape}")
    
    # 计算参数数量
    total_parameters = sum(param.numel() for param in model_seg_ms.parameters())
    print(f"AYNet with binary segmentation total parameters: {total_parameters:,}")
