# -*- coding: utf-8 -*-
"""
-------------------------------------------------
   File Name:       model_aynet.py
   Project Name:    beamform_AYnet
   Author :         Chunshan YANG
   Date:            2025/01/31
   Device:          GTX2070
-------------------------------------------------
   Change Activity:
                   2025/01/31
-------------------------------------------------
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import numpy as np

#创建并返回一个3x3的二维卷积层。它接受输入通道数、输出通道数等参数，并将这些参数传递给 nn.Conv2d 构造函数，以创建一个具有指定属性的卷积层。
def conv3x3(in_channels, out_channels, stride=1, 
            padding=1, bias=True, groups=1):    
    return nn.Conv2d(
        in_channels,
        out_channels,
        kernel_size=3,
        stride=stride,
        padding=padding,
        bias=bias,
        groups=groups)

#实现了两种上采样方法：转置卷积和双线性插值加1x1卷积。根据参数 mode 的不同选择不同的上采样方式。
# 如果 mode 是 'transpose'，则使用转置卷积；否则使用双线性插值后接1x1卷积。
def upconv2x2(in_channels, out_channels, mode='transpose'):
    if mode == 'transpose':
        return nn.ConvTranspose2d(
            in_channels,
            out_channels,
            kernel_size=2,
            stride=2)
    else:
        # out_channels is always going to be the same
        # as in_channels
        return nn.Sequential(
            nn.Upsample(mode='bilinear', scale_factor=2),
            conv1x1(in_channels, out_channels))

def conv1x1(in_channels, out_channels, groups=1):
    return nn.Conv2d(
        in_channels,
        out_channels,
        kernel_size=1,
        groups=groups,
        stride=1)

# 其功能是进行两次3x3卷积操作，并在每次卷积后进行批归一化和Leaky ReLU激活。如果设置了池化参数，则还会进行最大池化操作。
# forward 方法返回池化前后的特征图。
class DownConv(nn.Module):
    def __init__(self, in_channels, out_channels, pooling=True):
        super(DownConv, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.pooling = pooling

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)


        if self.pooling:
            self.pool = nn.MaxPool2d(kernel_size=2, stride=2)

    def forward(self, x):
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01)
        before_pool = x
        if self.pooling:
            x = self.pool(x)
        return x, before_pool

#定义一个包含三个卷积层和两个批量归一化层的神经网络模块。
# 初始化时，定义了三层卷积层（其中两层使用 conv3x3 函数创建）和两层批量归一化层。
# 前向传播过程中，输入先经过第一个卷积层和批量归一化层并应用 LeakyReLU 激活函数，然后通过第二个卷积层，最后再经过第三个卷积层、批量归一化层和 LeakyReLU 激活函数。
class Bottom(nn.Module):
    def __init__(self,in_channels, out_channels):
        super(Bottom, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = nn.Conv2d(self.out_channels, self.out_channels,
                               kernel_size=(10, 3), stride=(10, 1),padding=(0,1))
        self.conv3 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)

    def forward(self, x):
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01)
        x = self.conv2(x)
        x = F.leaky_relu(self.bn2(self.conv3(x)), 0.01)

        return x

# 它包含两个卷积层和两个批量归一化层。
# 初始化时，设置输入和输出通道数，并创建卷积和批量归一化层。
# 前向传播过程中，输入先经过第一个卷积层和批量归一化层，再通过 Leaky ReLU 激活函数，然后重复这一过程。
class ImBottom(nn.Module):
    def __init__(self,in_channels, out_channels):
        super(ImBottom, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)

    def forward(self, x):
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01)

        return x

class Combine_Conv(nn.Module):
    def __init__(self,in_channels, out_channels, merge_mode='concat'):
        super(Combine_Conv, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.merge_mode = merge_mode

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)

    def forward(self, rd, x):
        if self.merge_mode == 'concat':
            combine = torch.cat((rd, x), dim=1)
        else :
            combine = rd + x
        combine = F.leaky_relu(self.bn1(self.conv1(combine)), 0.01)
        combine = F.leaky_relu(self.bn2(self.conv2(combine)), 0.01)

        return combine

# 这段代码定义了一个名为 up_conv 的 PyTorch 模块，用于上采样和卷积操作。主要功能如下：
#初始化时，根据指定的模式选择上采样方法（转置卷积或双线性插值），并定义两个3x3卷积层和对应的批量归一化层。
#前向传播时，先进行上采样，然后通过两个卷积层和批量归一化层，并使用 Leaky ReLU 激活函数。
class UP_Conv(nn.Module):
    def __init__(self, in_channels, out_channels, up_mode='transpose'):
        super(UP_Conv, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.up_mode = up_mode

        self.upconv = upconv2x2(self.in_channels, self.in_channels,
            mode=self.up_mode)

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)
    
    def forward(self, x):
        """ Forward pass
        """
        x = self.upconv(x)
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01)
        return x

#定义了一个名为 UpConv 的 PyTorch 模块，用于实现上采样和特征融合。主要功能包括：
#初始化时根据参数选择上采样方式（转置卷积或双线性插值）和特征融合方式（相加或拼接）。
#前向传播时，先对输入的 from_up 进行上采样，然后根据 merge_mode 选择相加或拼接的方式融合多个输入特征图，最后通过两次卷积和批归一化处理输出结果。
class UpConv(nn.Module):

    def __init__(self, in_channels, out_channels, 
                 merge_mode='add', up_mode='transpose'):
        super(UpConv, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.merge_mode = merge_mode
        self.up_mode = up_mode

        self.upconv = upconv2x2(self.in_channels, self.in_channels,
            mode=self.up_mode)

        if self.merge_mode == 'add':
            self.conv1 = conv3x3(self.in_channels, self.out_channels)
        else:
            # num of input channels to conv2 is same,concat
            self.conv1 = conv3x3(3*self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)


    def forward(self, from_down1,from_down2, from_up):
        """ Forward pass
        Arguments:
            from_down1: tensor from the data encoder pathway
            from_down2: tensor from the das encoder pathway
            from_up: upconv'd tensor from the decoder pathway
        """
        from_up = self.upconv(from_up)
        if self.merge_mode == 'add':
            x = from_up + from_down1+from_down2

        else:
            #concat
            x = torch.cat((from_up, from_down1,from_down2), 1)
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01)
        return x


# 定义了一个注意力机制模块 Attention_block，用于增强特征图的特定区域。主要步骤如下：
# 1.初始化时定义三个卷积层和一个ReLU激活函数。
# 2.在前向传播中，对输入的两个特征图分别进行卷积和批量归一化处理。
# 3.将处理后的特征图相加并通过ReLU激活。
# 4.使用Sigmoid函数生成注意力权重图。
# 5.将注意力权重图与原始特征图相乘，输出加权后的特征图。
class Attention_block(nn.Module):
    def __init__(self,F_g,F_l,F_int):
        super(Attention_block,self).__init__()
        self.W_g = nn.Sequential(
            nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True),
            nn.BatchNorm2d(F_int)
            )
        
        self.W_x = nn.Sequential(
            nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True),
            nn.BatchNorm2d(F_int)
        )

        self.psi = nn.Sequential(
            nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True),
            nn.BatchNorm2d(1),
            nn.Sigmoid()
        )
        
        self.relu = nn.ReLU(inplace=True)
        
    def forward(self,g,x):
        g1 = self.W_g(g)
        x1 = self.W_x(x)
        psi = self.relu(g1+x1)
        psi = self.psi(psi)

        return x*psi

# Model 1 modified Unet for beamforming
class AYNet(nn.Module):
    def __init__(self,  in_channels=3, up_mode='transpose', merge_mode='concat'):
        """
        Arguments:
            in_channels: int, number of channels in the input tensor.
                Default is 3 for RGB images.
            up_mode: string, type of upconvolution. Choices: 'transpose'
                for transpose convolution or 'upsample' for nearest neighbour
                upsampling.
        """
        super(AYNet, self).__init__()
        if up_mode in ('transpose', 'upsample'):
            self.up_mode = up_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for "
                             "upsampling. Only \"transpose\" and "
                             "\"upsample\" are allowed.".format(up_mode))
    
        if merge_mode in ('concat', 'add'):
            self.merge_mode = merge_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for"
                             "merging up and down paths. "
                             "Only \"concat\" and "
                             "\"add\" are allowed.".format(up_mode))

        # NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
        if self.up_mode == 'upsample' and self.merge_mode == 'add':
            raise ValueError("up_mode \"upsample\" is incompatible "
                             "with merge_mode \"add\" at the moment "
                             "because it doesn't make sense to use "
                             "nearest neighbour to reduce "
                             "depth channels (by half).")

        self.in_channels = in_channels
       
        self.down1 = DownConv(1, 32)
        self.down2 = DownConv(32, 64)
        self.down3 = DownConv(64, 128)
        self.down4 = DownConv(128, 256)
        self.bottom = Bottom(256, 256)

        self.tdown1 = DownConv(1, 32)
        self.tdown2 = DownConv(32, 64)
        self.tdown3 = DownConv(64, 128)
        self.tdown4 = DownConv(128, 256)
        self.bbottom = ImBottom(256, 256)

        self.combine5 = Combine_Conv(512, 256, merge_mode)       

        self.up4 = UP_Conv(256, 256, up_mode)
        self.combine4 = Combine_Conv(512, 256, merge_mode)
        self.att4 = Attention_block(F_g=256, F_l=256, F_int=128)
        self.combine_att4 = Combine_Conv(512, 128, merge_mode)

        self.up3 = UP_Conv(128, 128, up_mode)
        self.combine3 = Combine_Conv(256, 128, merge_mode)        
        self.att3 = Attention_block(F_g=128, F_l=128, F_int=64)
        self.combine_att3 = Combine_Conv(256, 64, merge_mode)


        self.up2 = UP_Conv(64, 64, up_mode)
        self.combine2 = Combine_Conv(128, 64, merge_mode)        
        self.att2 = Attention_block(F_g=64, F_l=64, F_int=32)
        self.combine_att2 = Combine_Conv(128, 32, merge_mode)

        self.up1 = UP_Conv(32, 32, up_mode)
        self.combine1 = Combine_Conv(64, 32, merge_mode)        
        self.att1 = Attention_block(F_g=32, F_l=32, F_int=16)
        self.combine_att1 = Combine_Conv(64, 1, merge_mode)

        self.reset_params()

    @staticmethod
    def weight_init(m):
        if isinstance(m, nn.Conv2d):
            init.xavier_normal_(m.weight)
            init.constant_(m.bias, 0)

    def reset_params(self):
        for i, m in enumerate(self.modules()):
            self.weight_init(m)

    def forward(self, x,bfimg):
         #encoder1: raw data
        x1, before_pool1 = self.down1(x) # 2560x128, 1 channels -->  1280x64, 32 channels
        x2, before_pool2 = self.down2(x1)# 640x32,  64 channels
        x3, before_pool3 = self.down3(x2) # 320x16, 128 channels
        x4, before_pool4 = self.down4(x3) # 160x8, 256 channels
        x5 = self.bottom(x4) # 160x8, 256 channels --> 16x8, 256 channels
        before_pool4_resize = F.interpolate(before_pool4, (32, 16), mode='bilinear')  # 16x16 256 channels
        before_pool3_resize = F.interpolate(before_pool3, (64, 32), mode='bilinear')  # 32x32 128 channels
        before_pool2_resize = F.interpolate(before_pool2, (128, 64), mode='bilinear')  # 64x64 64 channels
        before_pool1_resize = F.interpolate(before_pool1, (256, 128), mode='bilinear') # 128x128 32 channels

         #encoder2: bf
        bx1,bxbefore_pool1= self.tdown1(bfimg)  # 1 --> 128x128 32 channels
        bx2,bxbefore_pool2= self.tdown2(bx1)  # 32 --> 64x64 64 channels
        bx3,bxbefore_pool3= self.tdown3(bx2)  # 64 --> 32x32 128 channels
        bx4,bxbefore_pool4= self.tdown4(bx3)  # 32x32 128 channels --> bx4:8x8 256 channels;bxbefore_pool4:16x16 256 channels
        bx5 = self.bbottom(bx4) # 8x8 256 channels --> 8x8, 256 channels

        # 处理第5层，将两个rd和bf特征图cat
        x5 = self.combine5(x5, bx5) # 16x8, 256 channels  cat  16x8, 256 channels --> 16x8, 256 channels

        #处理第4层
        #对第5层combine后的结果升采样得到d4
        d4 = self.up4(x5)           ## 8x8, 256 channels --> 16x16, 256 channels
        #before_pool4_resize 与 bxbefore_pool4 cat得到pool4_cat
        pool4_cat = self.combine4(before_pool4_resize, bxbefore_pool4)  # 16x16, 256 channels  cat  16x16, 256 channels --> 16x16, 256 channels
        #up5(128)与 pool4_cat(128)进入attention block得到att4_result(64)
        att4 = self.att4(d4, pool4_cat)  # 256, 256 --> 256
        #att4_result(64)与up5(128) cat得到att4_cat
        att4_cat = self.combine_att4(att4, d4)  # 256, 256 --> 128
        
        #处理第3层，对att4_cat升采样得到d3，融合rd和bf特征，注意力模块，融合注意力和第4层的升采样结果d3
        d3 = self.up3(att4_cat)           ## 
        pool3_cat = self.combine3(before_pool3_resize, bxbefore_pool3)  # 
        att3 = self.att3(d3, pool3_cat)  # 
        att3_cat = self.combine_att3(att3, d3)  # 

        #处理第2层
        d2 = self.up2(att3_cat)           ## 
        pool2_cat = self.combine2(before_pool2_resize, bxbefore_pool2)  # 
        att2 = self.att2(d2, pool2_cat)   #
        att2_cat = self.combine_att2(att2, d2)  # 

        #处理第1层
        d1 = self.up1(att2_cat)           ## 
        pool1_cat = self.combine1(before_pool1_resize, bxbefore_pool1)  # 
        att1 = self.att1(d1, pool1_cat)   # 
        out = self.combine_att1(att1, d1)  # 

        return out

if __name__ == "__main__":
    """
    testing
    """
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    #device =  torch.device('cuda:1')

    #x = Variable(torch.FloatTensor(np.random.random((1, 1, 2560, 128))),requires_grad = True).to(device)
    x = Variable(torch.FloatTensor(np.random.random((1, 1, 2560, 128))),requires_grad = True).to(device)  #64个阵元，4000个采样点
    img = Variable(torch.FloatTensor(np.random.random((1, 1, 256, 128))), requires_grad=True).to(device) #
    model = AYNet(in_channels=1, up_mode='upsample', merge_mode='concat').to(device)
    total_parameters = sum(param.numel() for param in model.parameters())
    out = model(x, img)
    print(out.shape)
    out = F.interpolate(out, (256, 128), mode='bilinear')
    loss = torch.mean(out)

    loss.backward()

    print(loss)
