from __future__ import print_function, division
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch
from segmentation.DCPNet import  CSP_Module


# from SwinTransformer.models.swin_transformer import ViTSwinTransformerBlock, VitPatchMerging


class conv_block(nn.Module):
    """
    Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super(conv_block, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True),

            nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True))

    def forward(self, x):
        x = self.conv(x)

        return x


class up_conv(nn.Module):
    """
    Up Convolution Block
    """

    def __init__(self, in_ch, out_ch, scale_factor=2):
        super(up_conv, self).__init__()
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=scale_factor),
            nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        x = self.up(x)
        return x


class ConvBNRelu1D(nn.Module):
    def __init__(self, inChannel, outChannel, kernel_size=3, padding=1, bias=False):
        super(ConvBNRelu1D, self).__init__()
        self.conv = nn.Conv1d(inChannel, outChannel, kernel_size=kernel_size, padding=padding, bias=bias)
        self.bn = nn.BatchNorm1d(outChannel, momentum=0.1, affine=True)
        self.reLu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return self.reLu(x)

class CoTAttention(nn.Module):

    def __init__(self, dim=512, out_dim=512, kernel_size=3):
        super().__init__()
        self.dim = dim
        self.out_dim = out_dim
        self.kernel_size = kernel_size

        self.key_embed = nn.Sequential(
            nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size//2, groups=4, bias=False),
            nn.BatchNorm2d(dim),
            nn.ReLU()
        )
        self.value_embed = nn.Sequential(
            nn.Conv2d(dim, dim, 1, bias=False),
            nn.BatchNorm2d(dim)
        )

        factor = 4
        self.attention_embed = nn.Sequential(
            nn.Conv2d(2*dim, 2*dim//factor, 1, bias=False),
            nn.BatchNorm2d(2*dim//factor),
            nn.ReLU(),
            nn.Conv2d(2*dim//factor, kernel_size*kernel_size*dim, 1)
        )

        self.output_conv = nn.Conv2d(dim, out_dim, 1)  # New layer to change output dimensions

    def forward(self, x):
        bs, c, h, w = x.shape
        k1 = self.key_embed(x)  # bs, c, h, w
        # print("k1",k1.size())
        v = self.value_embed(x).view(bs, c, -1)  # bs, c, h, w
        # print("v",v.size())

        y = torch.cat([k1, x], dim=1)  # bs, 2c, h, w
        # print("y",y.size())
        att = self.attention_embed(y)  # bs, c*k*k, h, w
        att = att.reshape(bs, c, self.kernel_size*self.kernel_size, h, w)
        att = att.mean(2, keepdim=False).view(bs, c, -1)  # bs, c, h*w
        k2 = F.softmax(att, dim=-1) * v
        k2 = k2.view(bs, c, h, w)

        out = k1 + k2
        out = self.output_conv(out)  # Apply the new layer to change output dimensions

        return out

# *************************************************************************************
# *************************************************************************************


###################################################################
#使用部分卷积代替普通卷积
#CoT和蛇形卷积并行提取
class TMP1(nn.Module):
    """
    UNet - Basic Implementation
    Paper : https://arxiv.org/abs/1505.04597
    """

    def __init__(self, in_ch=3, out_ch=1):
        super().__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = conv_block(in_ch, filters[0])
        self.pc1 = CSP_Module.Partial_conv3(in_ch, filters[0], 2, 'split_cat')

        self.Conv2 = conv_block(filters[0], filters[1])
        self.pc2 = CSP_Module.Partial_conv3(filters[0], filters[1], 2, 'split_cat')

        self.Conv3 = conv_block(filters[1], filters[2])
        self.pc3 = CSP_Module.Partial_conv3(filters[1], filters[2], 2, 'split_cat')

        self.Conv4 = conv_block(filters[2], filters[3])
        self.pc4 = CSP_Module.Partial_conv3(filters[2], filters[3], 2, 'split_cat')

        self.Conv5 = conv_block(filters[3], filters[4])
        self.pc5 = CSP_Module.Partial_conv3(filters[3], filters[4], 2, 'split_cat')

        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_conv5 = conv_block(filters[4], filters[3])
        # self.Up_conv5 = WinLearnedAttention(64,filters[4], filters[3])

        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_conv4 = conv_block(filters[3], filters[2])

        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_conv3 = conv_block(filters[2], filters[1])

        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_conv2 = conv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

        # self.active = torch.nn.Sigmoid()
        self.interWin1 = conv_block(3, filters[0])
        self.interWin2 = CSP_Module.DC(filters[0], filters[1])
        self.interWin3 = CSP_Module.DC(filters[1], filters[2])
        self.interWin4 = CSP_Module.DC(filters[2], filters[3])
        self.interWin5 = CSP_Module.DC(filters[3], filters[4])

    def forward(self, input):
        g1 = self.interWin1(input)

        g2 = self.Maxpool1(g1)
        g2 = self.interWin2(g2)

        g3 = self.Maxpool2(g2)
        g3 = self.interWin3(g3)

        g4 = self.Maxpool3(g3)
        g4 = self.interWin4(g4)

        g5 = self.Maxpool4(g4)
        g5 = self.interWin5(g5)

        e1 = self.pc1(input)

        e2 = self.Maxpool1(e1)
        e2 = self.pc2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.pc3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.pc4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.pc5(e5)

        e1 = e1 + g1
        e2 = e2 + g2
        e3 = e3 + g3
        e4 = e4 + g4
        e5 = e5 + g5

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)

        # d1 = self.active(out)

        return out

###################################################################
#使用普通卷积
#CoT和蛇形卷积并行提取
class TMP2(nn.Module):


    def __init__(self, in_ch=3, out_ch=1):
        super().__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = conv_block(in_ch, filters[0])
        self.pc1 = CSP_Module.Partial_conv3(in_ch, filters[0], 2, 'split_cat')

        self.Conv2 = conv_block(filters[0], filters[1])
        self.pc2 = CSP_Module.Partial_conv3(filters[0], filters[1], 2, 'split_cat')

        self.Conv3 = conv_block(filters[1], filters[2])
        self.pc3 = CSP_Module.Partial_conv3(filters[1], filters[2], 2, 'split_cat')

        self.Conv4 = conv_block(filters[2], filters[3])
        self.pc4 = CSP_Module.Partial_conv3(filters[2], filters[3], 2, 'split_cat')

        self.Conv5 = conv_block(filters[3], filters[4])
        self.pc5 = CSP_Module.Partial_conv3(filters[3], filters[4], 2, 'split_cat')

        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_conv5 = conv_block(filters[4], filters[3])
        # self.Up_conv5 = WinLearnedAttention(64,filters[4], filters[3])

        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_conv4 = conv_block(filters[3], filters[2])

        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_conv3 = conv_block(filters[2], filters[1])

        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_conv2 = conv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

        # self.active = torch.nn.Sigmoid()
        self.interWin1 = conv_block(3, filters[0])
        self.interWin2 = CSP_Module.DC(filters[0], filters[1])
        self.interWin3 = CSP_Module.DC(filters[1], filters[2])
        self.interWin4 = CSP_Module.DC(filters[2], filters[3])
        self.interWin5 = CSP_Module.DC(filters[3], filters[4])

    def forward(self, input):
        g1 = self.interWin1(input)

        g2 = self.Maxpool1(g1)
        g2 = self.interWin2(g2)

        g3 = self.Maxpool2(g2)
        g3 = self.interWin3(g3)

        g4 = self.Maxpool3(g3)
        g4 = self.interWin4(g4)

        g5 = self.Maxpool4(g4)
        g5 = self.interWin5(g5)

        e1 = self.Conv1(input)

        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.Conv3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)

        e1 = e1 + g1
        e2 = e2 + g2
        e3 = e3 + g3
        e4 = e4 + g4
        e5 = e5 + g5

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)

        # d1 = self.active(out)

        return out

###################################################################
#使用普通卷积
#CoT和蛇形卷积串行提取
class TMP3(nn.Module):


    def __init__(self, in_ch=3, out_ch=1):
        super().__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = conv_block(in_ch, filters[0])
        self.pc1 = CSP_Module.Partial_conv3(in_ch, filters[0], 2, 'split_cat')

        self.Conv2 = conv_block(filters[0], filters[1])
        self.pc2 = CSP_Module.Partial_conv3(filters[0], filters[1], 2, 'split_cat')

        self.Conv3 = conv_block(filters[1], filters[2])
        self.pc3 = CSP_Module.Partial_conv3(filters[1], filters[2], 2, 'split_cat')

        self.Conv4 = conv_block(filters[2], filters[3])
        self.pc4 = CSP_Module.Partial_conv3(filters[2], filters[3], 2, 'split_cat')

        self.Conv5 = conv_block(filters[3], filters[4])
        self.pc5 = CSP_Module.Partial_conv3(filters[3], filters[4], 2, 'split_cat')

        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_conv5 = conv_block(filters[4], filters[3])
        # self.Up_conv5 = WinLearnedAttention(64,filters[4], filters[3])

        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_conv4 = conv_block(filters[3], filters[2])

        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_conv3 = conv_block(filters[2], filters[1])

        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_conv2 = conv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

        # self.active = torch.nn.Sigmoid()
        self.interWin1 = conv_block(3, filters[0])
        self.interWin2 = CSP_Module.DC_Seq(filters[0], filters[1])
        self.interWin3 = CSP_Module.DC_Seq(filters[1], filters[2])
        self.interWin4 = CSP_Module.DC_Seq(filters[2], filters[3])
        self.interWin5 = CSP_Module.DC_Seq(filters[3], filters[4])

    def forward(self, input):
        g1 = self.interWin1(input)

        g2 = self.Maxpool1(g1)
        g2 = self.interWin2(g2)

        g3 = self.Maxpool2(g2)
        g3 = self.interWin3(g3)

        g4 = self.Maxpool3(g3)
        g4 = self.interWin4(g4)

        g5 = self.Maxpool4(g4)
        g5 = self.interWin5(g5)

        e1 = self.Conv1(input)

        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.Conv3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)

        e1 = e1 + g1
        e2 = e2 + g2
        e3 = e3 + g3
        e4 = e4 + g4
        e5 = e5 + g5

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)
        return out

###################################################################
#使用普通卷积和CoT串行
#CoT和蛇形卷积串行提取
class TMP4(nn.Module):


    def __init__(self, in_ch=3, out_ch=1):
        super().__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = conv_block(in_ch, filters[0])
        self.Conv2 = conv_block(filters[0], filters[1])
        self.Conv3 = conv_block(filters[1], filters[2])
        self.Conv4 = conv_block(filters[2], filters[3])
        self.Conv5 = conv_block(filters[3], filters[4])
        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_conv5 = conv_block(filters[4], filters[3])
        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_conv4 = conv_block(filters[3], filters[2])
        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_conv3 = conv_block(filters[2], filters[1])
        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_conv2 = conv_block(filters[1], filters[0])
        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

        self.interWin1 = conv_block(3, filters[0])
        self.interWin2 = CSP_Module.DC_Seq(filters[0], filters[1])
        self.interWin3 = CSP_Module.DC_Seq(filters[1], filters[2])
        self.interWin4 = CSP_Module.DC_Seq(filters[2], filters[3])
        self.interWin5 = CSP_Module.DC_Seq(filters[3], filters[4])

        self.CoT1=CoTAttention(filters[0], filters[0])
        self.CoT2=CoTAttention(filters[1], filters[1])
        self.CoT3=CoTAttention(filters[2], filters[2])
        self.CoT4=CoTAttention(filters[3], filters[3])
        self.CoT5=CoTAttention(filters[4], filters[4])

    def forward(self, input):
        g1 = self.interWin1(input)

        g2 = self.Maxpool1(g1)
        g2 = self.interWin2(g2)

        g3 = self.Maxpool2(g2)
        g3 = self.interWin3(g3)

        g4 = self.Maxpool3(g3)
        g4 = self.interWin4(g4)

        g5 = self.Maxpool4(g4)
        g5 = self.interWin5(g5)

        e1 = self.Conv1(input)
        e1=self.CoT1(e1)
        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)
        e2=self.CoT2(e2)
        e3 = self.Maxpool2(e2)
        e3 = self.Conv3(e3)
        e3=self.CoT3(e3)
        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)
        e4=self.CoT4(e4)
        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)
        e5=self.CoT5(e5)
        e1 = e1 + g1
        e2 = e2 + g2
        e3 = e3 + g3
        e4 = e4 + g4
        e5 = e5 + g5

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)
        return out

###################################################################
#使用普通卷积和CoT并行
#蛇形卷积和CoT并行
class TMP5(nn.Module):


    def __init__(self, in_ch=3, out_ch=1):
        super().__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = conv_block(in_ch, filters[0])
        self.Conv2 = conv_block(filters[0], filters[1])
        self.Conv3 = conv_block(filters[1], filters[2])
        self.Conv4 = conv_block(filters[2], filters[3])
        self.Conv5 = conv_block(filters[3], filters[4])
        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_conv5 = conv_block(filters[4], filters[3])
        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_conv4 = conv_block(filters[3], filters[2])
        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_conv3 = conv_block(filters[2], filters[1])
        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_conv2 = conv_block(filters[1], filters[0])
        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

        self.interWin1 = conv_block(3, filters[0])
        self.interWin2 = CSP_Module.DC(filters[0], filters[1])
        self.interWin3 = CSP_Module.DC(filters[1], filters[2])
        self.interWin4 = CSP_Module.DC(filters[2], filters[3])
        self.interWin5 = CSP_Module.DC(filters[3], filters[4])

        self.CoT1=CoTAttention(filters[0], filters[0])
        self.CoT2=CoTAttention(filters[1], filters[1])
        self.CoT3=CoTAttention(filters[2], filters[2])
        self.CoT4=CoTAttention(filters[3], filters[3])
        self.CoT5=CoTAttention(filters[4], filters[4])

    def forward(self, input):
        print(input.size())
        g1 = self.interWin1(input)
        g2 = self.Maxpool1(g1)
        g2 = self.interWin2(g2)
        g3 = self.Maxpool2(g2)
        g3 = self.interWin3(g3)
        g4 = self.Maxpool3(g3)
        g4 = self.interWin4(g4)
        g5 = self.Maxpool4(g4)
        g5 = self.interWin5(g5)

        e1 = self.Conv1(input)
        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)
        p2=self.CoT2(e2)
        e2=e2+p2
        e3 = self.Maxpool2(e2)

        e3 = self.Conv3(e3)
        p3=self.CoT3(e3)
        e3=e3+p3
        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)
        p4=self.CoT4(e4)
        e4=e4+p4
        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)
        p5=self.CoT5(e5)
        e5=e5+p5
        e1 = e1 + g1
        e2 = e2 + g2
        e3 = e3 + g3
        e4 = e4 + g4
        e5 = e5 + g5

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)
        return out




if __name__ == '__main__':
    block = TMP4(3, 2).cuda()
    input = torch.rand(1, 3, 256, 256).cuda()
    # block.eval()
    output = block(input)
    # torch.save(block.state_dict(), 'TMP1.pth'   )
    # torch.onnx.export(block, input, 'TMP1.onnx', verbose=False, input_names=['input'], output_names=['output'],opset_version=11)
    print(input.size(), output.size())