# -*- coding: utf-8 -*-
"""
-------------------------------------------------
   File Name:       main
   Project Name:    beamform_Ynet
   Author :         Hengrong LAN
   Date:            2018/12/27
   Device:          GTX1080Ti


   conv to skip the feature in  DownConv
-------------------------------------------------
   Change Activity:
                   2018/12/10:
-------------------------------------------------
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import numpy as np




def conv3x3(in_channels, out_channels, stride=1, 
            padding=1, bias=True, groups=1):    
    return nn.Conv2d(
        in_channels,
        out_channels,
        kernel_size=3,
        stride=stride,
        padding=padding,
        bias=bias,
        groups=groups)

def upconv2x2(in_channels, out_channels, mode='transpose'):
    if mode == 'transpose':
        return nn.ConvTranspose2d(
            in_channels,
            out_channels,
            kernel_size=2,
            stride=2)
    else:
        # out_channels is always going to be the same
        # as in_channels
        return nn.Sequential(
            nn.Upsample(mode='bilinear', scale_factor=2),
            conv1x1(in_channels, out_channels))

def conv1x1(in_channels, out_channels, groups=1):
    return nn.Conv2d(
        in_channels,
        out_channels,
        kernel_size=1,
        groups=groups,
        stride=1)



class DownConv(nn.Module):

    def __init__(self, in_channels, out_channels, pooling=True, pool_mode='both', raw_data=False):
        super(DownConv, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.pooling = pooling
        self.raw_data = raw_data
        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)
        self.conv3 = nn.Conv2d(self.out_channels, self.out_channels,
                               kernel_size=(20, 3), stride=(20, 1),padding=(0,1))

        if self.pooling:
            if pool_mode == 'both':
                self.pool = nn.MaxPool2d(kernel_size=2, stride=2)  # 原来的方式
            elif pool_mode == 'height_only':
                self.pool = nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1))  # 只在高度方向池化
            elif pool_mode == 'width_only':
                self.pool = nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 2))  # 只在宽度方向池化
            else:
                raise ValueError(f"Unsupported pool_mode: {pool_mode}")

    def forward(self, x):
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01, inplace=True)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01, inplace=True)
        before_pool = x
        if self.raw_data:
            before_pool = self.conv3(before_pool)
        if self.pooling:
            x = self.pool(x)
        return x, before_pool

class Bottom(nn.Module):
    def __init__(self,in_channels, out_channels):
        super(Bottom, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = nn.Conv2d(self.out_channels, self.out_channels,
                               kernel_size=(10, 3), stride=(10, 1),padding=(0,1))
        self.conv3 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)

    def forward(self, x):
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01, inplace=True)
        x = self.conv2(x)
        x = F.leaky_relu(self.bn2(self.conv3(x)), 0.01, inplace=True)

        return x

class ImBottom(nn.Module):
    def __init__(self,in_channels, out_channels):
        super(ImBottom, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels

        self.conv1 = conv3x3(self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)

    def forward(self, x):
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01, inplace=True)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01, inplace=True)

        return x

class UpConv(nn.Module):

    def __init__(self, in_channels, out_channels, 
                 merge_mode='add', up_mode='transpose'):
        super(UpConv, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.merge_mode = merge_mode
        self.up_mode = up_mode

        self.upconv = upconv2x2(self.in_channels, self.in_channels,
            mode=self.up_mode)

        if self.merge_mode == 'add':
            self.conv1 = conv3x3(
                self.in_channels, self.out_channels)
        else:
            # num of input channels to conv2 is same,concat
            self.conv1 = conv3x3(3*self.in_channels, self.out_channels)
        self.bn1 = nn.BatchNorm2d(self.out_channels)
        self.conv2 = conv3x3(self.out_channels, self.out_channels)
        self.bn2 = nn.BatchNorm2d(self.out_channels)


    def forward(self, from_down1,from_down2, from_up):
        """ Forward pass
        Arguments:
            from_down1: tensor from the data encoder pathway
            from_down2: tensor from the das encoder pathway
            from_up: upconv'd tensor from the decoder pathway
        """
        from_up = self.upconv(from_up)
        if self.merge_mode == 'add':
            x = from_up + from_down1+from_down2

        else:
            #concat
            x = torch.cat((from_up, from_down1,from_down2), 1)
        x = F.leaky_relu(self.bn1(self.conv1(x)), 0.01, inplace=True)
        x = F.leaky_relu(self.bn2(self.conv2(x)), 0.01, inplace=True)
        return x


class SegmentationHead(nn.Module):
    """
    分割辅助头，用于从解码器特征生成二分类分割掩码
    """
    def __init__(self, in_channels, use_dropout=False, dropout_rate=0.1):
        super(SegmentationHead, self).__init__()
        
        # 分割特征提取层
        self.seg_conv1 = conv3x3(in_channels, in_channels // 2)
        self.seg_bn1 = nn.BatchNorm2d(in_channels // 2)
        
        self.seg_conv2 = conv3x3(in_channels // 2, in_channels // 4)
        self.seg_bn2 = nn.BatchNorm2d(in_channels // 4)
        
        # 可选的Dropout层用于正则化
        self.use_dropout = use_dropout
        if use_dropout:
            self.dropout = nn.Dropout2d(dropout_rate)
        
        # 最终分割输出层 - 固定为二分类，输出logits（不使用激活函数）
        self.seg_final = nn.Conv2d(in_channels // 4, 1, kernel_size=1)
        
    def forward(self, x):
        # 特征提取
        x = F.leaky_relu(self.seg_bn1(self.seg_conv1(x)), 0.01, inplace=True)
        x = F.leaky_relu(self.seg_bn2(self.seg_conv2(x)), 0.01, inplace=True)
        
        # 可选Dropout
        if self.use_dropout:
            x = self.dropout(x)
        
        # 分割输出 - 直接输出logits
        seg_out = self.seg_final(x)
        
        return seg_out


class MultiScaleSegmentationHead(nn.Module):
    """
    多尺度分割头，融合不同层次的特征进行二分类分割
    """
    def __init__(self, feature_channels_list, use_dropout=False, dropout_rate=0.1):
        super(MultiScaleSegmentationHead, self).__init__()
        
        # 对不同尺度特征进行通道调整
        self.feature_adapters = nn.ModuleList()
        unified_channels = 64  # 统一通道数
        
        for channels in feature_channels_list:
            self.feature_adapters.append(
                nn.Sequential(
                    nn.Conv2d(channels, unified_channels, 1),
                    nn.BatchNorm2d(unified_channels),
                    nn.ReLU(inplace=True)
                )
            )
        
        # 特征融合
        self.fusion_conv = nn.Sequential(
            conv3x3(unified_channels * len(feature_channels_list), unified_channels),
            nn.BatchNorm2d(unified_channels),
            nn.ReLU(inplace=True)
        )
        
        # 分割头 - 固定为二分类
        self.seg_head = SegmentationHead(unified_channels, use_dropout, dropout_rate)
    
    def forward(self, features_list, target_size):
        # 调整所有特征到目标尺寸并统一通道
        adapted_features = []
        for i, features in enumerate(features_list):
            # 通道调整
            adapted = self.feature_adapters[i](features)
            # 尺寸调整到目标尺寸
            if adapted.shape[-2:] != target_size:
                adapted = F.interpolate(adapted, size=target_size, mode='bilinear', align_corners=False)
            adapted_features.append(adapted)
        
        # 特征融合
        fused_features = torch.cat(adapted_features, dim=1)
        fused_features = self.fusion_conv(fused_features)
        
        # 分割输出
        seg_out = self.seg_head(fused_features)
        
        return seg_out


# Model 1 modified Unet for beamforming
class YNet(nn.Module):
    def __init__(self,  in_channels=3, up_mode='transpose', merge_mode='concat', 
                 enable_segmentation=False, seg_use_multiscale=False, seg_use_dropout=True, seg_dropout_rate=0.1):
        """
        Arguments:
            in_channels: int, number of channels in the input tensor.
                Default is 3 for RGB images.
            up_mode: string, type of upconvolution. Choices: 'transpose'
                for transpose convolution or 'upsample' for nearest neighbour
                upsampling.
        """
        super(YNet, self).__init__()
        if up_mode in ('transpose', 'upsample'):
            self.up_mode = up_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for "
                             "upsampling. Only \"transpose\" and "
                             "\"upsample\" are allowed.".format(up_mode))
    
        if merge_mode in ('concat', 'add'):
            self.merge_mode = merge_mode
        else:
            raise ValueError("\"{}\" is not a valid mode for"
                             "merging up and down paths. "
                             "Only \"concat\" and "
                             "\"add\" are allowed.".format(up_mode))

        # NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
        if self.up_mode == 'upsample' and self.merge_mode == 'add':
            raise ValueError("up_mode \"upsample\" is incompatible "
                             "with merge_mode \"add\" at the moment "
                             "because it doesn't make sense to use "
                             "nearest neighbour to reduce "
                             "depth channels (by half).")

        self.in_channels = in_channels
        self.enable_segmentation = enable_segmentation
        self.seg_use_multiscale = seg_use_multiscale
        
        self.rddown1 = DownConv(1,32,raw_data=True, pooling=True, pool_mode='height_only')
        self.rddown2 = DownConv(32,64,raw_data=True)
        self.rddown3 = DownConv(64,128,raw_data=True)
        self.rddown4 = DownConv(128,256,raw_data=True)
        self.bottom = Bottom(256,256)

        self.down1 = DownConv(1,32)
        self.down2 = DownConv(32,64)
        self.down3 = DownConv(64,128)
        self.down4 = DownConv(128,256)
        self.bbottom = ImBottom(256,256)
        self.combine = ImBottom(512,256)
        self.up1 = UpConv(256,128,merge_mode=self.merge_mode)
        self.up2 = UpConv(128,64,merge_mode=self.merge_mode)
        self.up3 = UpConv(64,32,merge_mode=self.merge_mode)
        self.up4 = UpConv(32,16,merge_mode=self.merge_mode)

        self.final_conv = nn.Conv2d(16, 1, kernel_size=1)  # 16 -> 1
        self.final_act = nn.Tanh()

        # 分割相关组件 - 固定为二分类
        if self.enable_segmentation:
            if seg_use_multiscale:
                # 多尺度分割头，使用来自不同解码层的特征
                feature_channels = [16, 32, 64, 128]  # 来自不同解码层的通道数
                self.segmentation_head = MultiScaleSegmentationHead(
                    feature_channels_list=feature_channels,
                    use_dropout=seg_use_dropout,
                    dropout_rate=seg_dropout_rate
                )
            else:
                # 单尺度分割头，使用最终解码特征
                self.segmentation_head = SegmentationHead(
                    in_channels=16,  # 使用d4的通道数，它有16个通道
                    use_dropout=seg_use_dropout,
                    dropout_rate=seg_dropout_rate
                )

        self.reset_params()

    @staticmethod
    def weight_init(m):
        if isinstance(m, nn.Conv2d):
            init.xavier_normal_(m.weight)
            if m.bias is not None:
                init.constant_(m.bias, 0)
        elif isinstance(m, nn.ConvTranspose2d):
            init.xavier_normal_(m.weight)
            if m.bias is not None:
                init.constant_(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant_(m.weight, 1)
            init.constant_(m.bias, 0)

    def reset_params(self):
        for i, m in enumerate(self.modules()):
            self.weight_init(m)


    def forward(self, x,bfimg, return_segmentation=None):
        # 确定是否返回分割结果
        if return_segmentation is None:
            return_segmentation = self.enable_segmentation

         #encoder1: raw data
        x1, before_pool1 = self.rddown1(x) # x1: (n, 32, 1280, 64), before_pool1: (n, 32, 2560, 64)
        x2, before_pool2 = self.rddown2(x1) # x2: (n, 64, 640, 32), before_pool2: (n, 64, 1280, 64)
        x3, before_pool3 = self.rddown3(x2) # x3: (n, 128, 320, 16), before_pool3: (n, 128, 640, 32)
        x4, before_pool4 = self.rddown4(x3) # x4: (n, 256, 160, 8), before_pool4: (n, 256, 320, 16)
        x5 = self.bottom(x4)  # x5: (n, 256, 16, 8)

         #encoder2: bf
        bx1,bxbefore_pool1= self.down1(bfimg)  # bx1: (n, 32, 128, 64), bxbefore_pool1: (n, 32, 256, 128)
        bx2,bxbefore_pool2= self.down2(bx1)  # bx2: (n, 64, 64, 32), bxbefore_pool2: (n, 64, 128, 64)
        bx3,bxbefore_pool3= self.down3(bx2)  # bx3: (n, 128, 32, 16), bxbefore_pool3: (n, 128, 64, 32)
        bx4,bxbefore_pool4= self.down4(bx3)  # bx4: (n, 256, 16, 8), bxbefore_pool4: (n, 256, 32, 16)
        bx5 = self.bbottom(bx4) # bx5: (n, 256, 16, 8)

        # 调整上采样尺寸以匹配对应层的特征图尺寸
        before_pool4_resize = F.interpolate(before_pool4, size=bxbefore_pool4.shape[2:], mode='bilinear', align_corners=False, antialias=False)
        before_pool3_resize = F.interpolate(before_pool3, size=bxbefore_pool3.shape[2:], mode='bilinear', align_corners=False, antialias=False)
        before_pool2_resize = F.interpolate(before_pool2, size=bxbefore_pool2.shape[2:], mode='bilinear', align_corners=False, antialias=False)
        before_pool1_resize = F.interpolate(before_pool1, size=bxbefore_pool1.shape[2:], mode='bilinear', align_corners=False, antialias=False)
        
        if self.merge_mode == 'add':
            out = x5 + bx5
        else:
            #concat
            out = torch.cat((x5, bx5), 1)
        out = self.combine(out)  # out: (n, 256, 16, 8)
        d1 = self.up1(before_pool4_resize,bxbefore_pool4, out) # d1: (n, 128, 32, 16)
        d2 = self.up2(before_pool3_resize,bxbefore_pool3, d1)  # d2: (n, 64, 64, 32)
        d3 = self.up3(before_pool2_resize,bxbefore_pool2, d2) # d3: (n, 32, 128, 64)
        d4 = self.up4(before_pool1_resize,bxbefore_pool1, d3) # d4: (n, 1, 256, 128)
        
        out = self.final_conv(d4)
        out = self.final_act(out)  # 应用Tanh激活函数，输出范围在[-1, 1]之间

        # 分割输出处理
        if return_segmentation:
            if self.enable_segmentation:
                if self.seg_use_multiscale:
                    # 收集多尺度特征进行分割
                    multi_scale_features = [d4, d3, d2, d1]  # 不同解码层的特征
                    # 获取最终输出尺寸作为目标尺寸
                    target_size = (bfimg.shape[2], bfimg.shape[3])  # (256, 128)
                    seg_out = self.segmentation_head(multi_scale_features, target_size)
                else:
                    # 使用最终解码特征进行分割
                    seg_out = self.segmentation_head(d4)
                return out, seg_out
            else:
                # 如果模型未启用分割但要求返回分割结果，返回None作为分割输出
                return out, None
        else:
            return out


if __name__ == "__main__":
    """
    testing
    """
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    #device =  torch.device('cuda:1')

    x = Variable(torch.FloatTensor(np.random.random((1, 1, 2560, 64))),requires_grad = True).to(device)
    img = Variable(torch.FloatTensor(np.random.random((1, 1, 256, 128))), requires_grad=True).to(device)
    
    # 测试不带分割的模型
    model = YNet(in_channels=1, merge_mode='concat', enable_segmentation=False).to(device)
    out = model(x, img)
    print(f"YNet without segmentation output shape: {out.shape}")
    
    # 测试带单尺度分割的模型
    model_seg = YNet(in_channels=1, merge_mode='concat', 
                     enable_segmentation=True, seg_use_multiscale=False).to(device)
    out, seg_out = model_seg(x, img)
    print(f"YNet with single-scale binary segmentation - reconstruction: {out.shape}, segmentation: {seg_out.shape}")
    
    # 测试带多尺度分割的模型
    model_seg_ms = YNet(in_channels=1, merge_mode='concat', 
                        enable_segmentation=True, seg_use_multiscale=True).to(device)
    out, seg_out = model_seg_ms(x, img)
    print(f"YNet with multi-scale binary segmentation - reconstruction: {out.shape}, segmentation: {seg_out.shape}")
    
    # 测试带Dropout的分割模型
    model_seg_dropout = YNet(in_channels=1, merge_mode='concat', 
                             enable_segmentation=True, seg_use_multiscale=True, 
                             seg_use_dropout=True, seg_dropout_rate=0.2).to(device)
    out, seg_out = model_seg_dropout(x, img)
    print(f"YNet with dropout binary segmentation - reconstruction: {out.shape}, segmentation: {seg_out.shape}")
    
    # 计算参数数量
    total_parameters = sum(param.numel() for param in model_seg_ms.parameters())
    print(f"YNet with binary segmentation total parameters: {total_parameters:,}")
    
    # 原有测试代码
    model_original = YNet(in_channels=1, merge_mode='concat').to(device)
    total_parameters_original = sum(param.numel() for param in model_original.parameters())
    out_original = model_original(x, img)
    loss = torch.mean(out_original)
    loss.backward()
    print(f"Original YNet loss: {loss}")
