import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import torch.nn.functional as F

class ConvBNRelu(nn.Module):
    def __init__(self, inChannel, outChannel, kernel_size=3, padding=1, bias=True):
        super(ConvBNRelu, self).__init__()
        self.conv = nn.Conv2d(inChannel, outChannel, kernel_size=kernel_size, padding=padding, bias=bias)
        self.bn = nn.BatchNorm2d(outChannel, momentum=0.1, affine=True)
        self.reLu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return self.reLu(x)
class PatchMerging(nn.Module):
    r""" Patch Merging Layer.

    Args:
        input_resolution (tuple[int]): Resolution of input feature.
        dim (int): Number of input channels.
        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm
    """

    def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
        super().__init__()
        self.input_resolution = input_resolution
        self.dim = dim
        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
        self.norm = norm_layer(4 * dim)

    def forward(self, x):
        """
        x: B, H*W, C
        """
        H, W = self.input_resolution
        B, L, C = x.shape
        assert L == H * W, "input feature has wrong size"
        assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."

        x = x.view(B, H, W, C)

        x0 = x[:, 0::2, 0::2, :]  # B H/2 W/2 C
        x1 = x[:, 1::2, 0::2, :]  # B H/2 W/2 C
        x2 = x[:, 0::2, 1::2, :]  # B H/2 W/2 C
        x3 = x[:, 1::2, 1::2, :]  # B H/2 W/2 C
        x = torch.cat([x0, x1, x2, x3], -1)  # B H/2 W/2 4*C
        x = x.view(B, -1, 4 * C)  # B H/2*W/2 4*C

        x = self.norm(x)
        x = self.reduction(x)

        return x

    def extra_repr(self) -> str:
        return f"input_resolution={self.input_resolution}, dim={self.dim}"

    def flops(self):
        H, W = self.input_resolution
        flops = H * W * self.dim
        flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
        return flops
class attention(nn.Module):
    def __init__(self, dim,  num_heads,  qkv_bias=True,):
        super().__init__()

        self.num_heads=num_heads
        self.v = nn.Conv2d(dim//num_heads,dim//num_heads, 1, bias = qkv_bias)
    def forward(self, x):
        # x是b,c,h,w
        shortcut=x
        B_, C, H, W  = x.shape
        x = x.reshape(B_ * self.num_heads, C // self.num_heads, H, W) #切换成多头,B*num_heads,C // self.num_heads，H,W
        v=self.v(x)
        # 将v进行多尺度的池化
        # 步长为2
        # p2 = F.max_pool2d(v, 2)
        # p2 = F.upsample_bilinear(p2, [H,W])
        #
        # # p8 = F.max_pool2d(v, 8)
        # # p8 = F.upsample_bilinear(p8, [H,W])
        #
        #
        p16 = F.max_pool2d(v, H//16)
        p16 = F.upsample_bilinear(p16, [H,W])

        p8 = F.max_pool2d(v, H // 8)
        p8 = F.upsample_bilinear(p8, [H, W])

        p4 = F.max_pool2d(v, H // 4)
        p4 = F.upsample_bilinear(p4, [H, W])

        p2 = F.max_pool2d(v, H//2)
        p2 = F.upsample_bilinear(p2, [H,W])


        p1 = F.max_pool2d(v, H//1)
        p1 = F.upsample_bilinear(p1, [H,W])


        attn = v*p1*p2*p4*p8*p16   # B*num_heads,C // self.num_heads，H,W
        attn=attn.reshape(B_, C, H, W) #对多头进行整合B,C,H,W
        attn=attn+shortcut

        return attn

class VFormer(nn.Module):
    def __init__(self, in_chans=3, num_classes=2,
                 embed_dim=32, num_heads=[2,4, 4,8,8],
                  qkv_bias=True,
                 norm_layer=nn.LayerNorm):
        super().__init__()
        self.depths = [32, 32, 32, 32, 32]

        self.embed = nn.Conv2d(in_chans, embed_dim, 1, bias=True)
        self.num_layers = len(num_heads)





        # build layers
        self.layers = nn.ModuleList()
        for i_layer in range(self.num_layers):
            layer = attention(
                dim=self.depths[i_layer] ,
                num_heads=num_heads[i_layer],
                qkv_bias=qkv_bias,)
            self.layers.append(layer)

        # self.norm = norm_layer(embed_dim)

        #解码


        self.ConvTranspose1_0= nn.ConvTranspose2d(self.depths[0], self.depths[0], 2, 2)
        self.Conv1_0=nn.Sequential(ConvBNRelu(self.depths[0], self.depths[0]),
                                     ConvBNRelu(self.depths[0], self.depths[0]))

        self.ConvTranspose2_1= nn.ConvTranspose2d(self.depths[2], self.depths[1], 2, 2)
        self.Conv2_1=nn.Sequential(ConvBNRelu(self.depths[1]*2, self.depths[1]),
                                     ConvBNRelu(self.depths[1], self.depths[1]))
        self.ConvTranspose2_0= nn.ConvTranspose2d(self.depths[1], self.depths[0], 2, 2)
        self.Conv2_0=nn.Sequential(ConvBNRelu(self.depths[0], self.depths[0]),
                                     ConvBNRelu(self.depths[0], self.depths[0]))

        self.ConvTranspose3_2= nn.ConvTranspose2d(self.depths[3], self.depths[2], 2, 2)
        self.Conv3_2=nn.Sequential(ConvBNRelu(self.depths[2]*2, self.depths[2]),
                                     ConvBNRelu(self.depths[2], self.depths[2]))
        self.ConvTranspose3_1= nn.ConvTranspose2d(self.depths[2], self.depths[1], 2, 2)
        self.Conv3_1=nn.Sequential(ConvBNRelu(self.depths[1]*2, self.depths[1]),
                                     ConvBNRelu(self.depths[1], self.depths[1]))
        self.ConvTranspose3_0= nn.ConvTranspose2d(self.depths[1], self.depths[0], 2, 2)
        self.Conv3_0=nn.Sequential(ConvBNRelu(self.depths[0], self.depths[0]),
                                     ConvBNRelu(self.depths[0], self.depths[0]))

        self.ConvTranspose4_3= nn.ConvTranspose2d(self.depths[4], self.depths[3], 2, 2)
        self.Conv4_3=nn.Sequential(ConvBNRelu(self.depths[3]*2, self.depths[3]),
                                     ConvBNRelu(self.depths[3], self.depths[3]))
        self.ConvTranspose4_2= nn.ConvTranspose2d(self.depths[3], self.depths[2], 2, 2)
        self.Conv4_2=nn.Sequential(ConvBNRelu(self.depths[2]*2, self.depths[2]),
                                     ConvBNRelu(self.depths[2], self.depths[2]))
        self.ConvTranspose4_1= nn.ConvTranspose2d(self.depths[2], self.depths[1], 2, 2)
        self.Conv4_1=nn.Sequential(ConvBNRelu(self.depths[1]*2, self.depths[1]),
                                     ConvBNRelu(self.depths[1], self.depths[1]))
        self.ConvTranspose4_0= nn.ConvTranspose2d(self.depths[1], self.depths[0], 2, 2)
        self.Conv4_0=nn.Sequential(ConvBNRelu(self.depths[0], self.depths[0]),
                                     ConvBNRelu(self.depths[0], self.depths[0]))

        self.ConvAll = nn.Sequential(ConvBNRelu(self.depths[0] * 5, self.depths[0]),
                                     ConvBNRelu(self.depths[0], self.depths[0]))
        self.ConvOut = nn.Conv2d(self.depths[0], num_classes, 1, bias=True)

    def forward(self, x):
        x=self.embed(x)
        levels=[]
        for layer in self.layers:
            x = layer(x)
            levels.append(x)
            x = F.max_pool2d(x, 2)

        # x = self.norm(x)  # B,C,H,W

        # #解码
        # level0=levels[0]
        # # level1的解码
        # level1 = self.ConvTranspose1_0(levels[1])
        # level1 = self.Conv1_0(level1)
        #
        # # level2的解码
        # level2 = self.ConvTranspose2_1(levels[2])
        # level2 = torch.cat([level2, levels[1]], dim=1)
        # level2 = self.Conv2_1(level2)
        #
        # level2 = self.ConvTranspose2_0(level2)
        # level2 = self.Conv2_0(level2)
        #
        # # level3的解码
        # level3 = self.ConvTranspose3_2(levels[3])
        # level3 = torch.cat([level3, levels[2]], dim=1)
        # level3 = self.Conv3_2(level3)
        #
        # level3 = self.ConvTranspose3_1(level3)
        # level3 = torch.cat([level3, levels[1]], dim=1)
        # level3 = self.Conv3_1(level3)
        #
        # level3 = self.ConvTranspose3_0(level3)
        # level3 = self.Conv3_0(level3)
        #
        # # level4的解码
        # level4 = self.ConvTranspose4_3(levels[4])
        # level4 = torch.cat([level4, levels[3]], dim=1)
        # level4 = self.Conv4_3(level4)
        #
        # level4 = self.ConvTranspose4_2(level4)
        # level4 = torch.cat([level4, levels[2]], dim=1)
        # level4 = self.Conv4_2(level4)
        #
        # level4 = self.ConvTranspose4_1(level4)
        # level4 = torch.cat([level4, levels[1]], dim=1)
        # level4 = self.Conv4_1(level4)
        #
        # level4 = self.ConvTranspose4_0(level4)
        # level4 = self.Conv4_0(level4)
        #
        # # 所有level的融合
        # out = torch.cat([level0, level1, level2, level3,level4], dim=1)
        # out = self.ConvAll(out)
        out=F.upsample_bilinear(x,[512,512])
        out = self.ConvOut(out)

        return out

