# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models

import BuildFormer.geoseg.models.BuildFormer as BuildFormer
import BuildFormer.geoseg.models.newBuildFormer as newBuildFormer
import network.model as CFENet
import network.resnet as ResNet
import SwinTransformer.models.swin_transformer as sw
import patchNet
import cv2
import SwinTransformer.models.swin_transformer as swin

IMAGE_ORDERING = 'channels_first'
class ConvBNRelu(nn.Module):
    def __init__(self,inChannel,outChannel,kernel_size=3,padding=1,bias=False):
        super(ConvBNRelu, self).__init__()
        self.conv = nn.Conv2d(inChannel, outChannel, kernel_size=kernel_size, padding=padding,bias=bias)
        self.bn = nn.BatchNorm2d(outChannel, momentum=0.1, affine=True)
        self.reLu = nn.ReLU(inplace=True)

    def forward(self, x):
        x=self.conv(x)
        x=self.bn(x)
        return self.reLu(x)

class ConvBNRelu1D(nn.Module):
    def __init__(self,inChannel,outChannel,kernel_size=3,padding=1,bias=False):
        super(ConvBNRelu1D, self).__init__()
        self.conv = nn.Conv1d(inChannel, outChannel, kernel_size=kernel_size, padding=padding,bias=bias)
        self.bn = nn.BatchNorm1d(outChannel, momentum=0.1, affine=True)
        self.reLu = nn.ReLU(inplace=True)

    def forward(self, x):
        x=self.conv(x)
        x=self.bn(x)
        return self.reLu(x)
class ASPP(nn.Module):
    def __init__(self, in_channel=512, depth=256):
        super(ASPP, self).__init__()
        # global average pooling : init nn.AdaptiveAvgPool2d ;also forward torch.mean(,,keep_dim=True)
        self.mean = nn.AdaptiveAvgPool2d((1, 1))
        self.conv = nn.Conv2d(in_channel, depth, 1, 1)
        # k=1 s=1 no pad
        self.atrous_block1 = nn.Conv2d(in_channel, depth, 1, 1)
        self.atrous_block6 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6)
        self.atrous_block12 = nn.Conv2d(in_channel, depth, 3, 1, padding=12, dilation=12)
        self.atrous_block18 = nn.Conv2d(in_channel, depth, 3, 1, padding=18, dilation=18)

        self.conv_1x1_output = nn.Conv2d(depth * 5, depth, 1, 1)

    def forward(self, x):
        size = x.shape[2:]

        image_features = self.mean(x)
        image_features = self.conv(image_features)
        image_features = F.upsample(image_features, size=size, mode='bilinear')

        atrous_block1 = self.atrous_block1(x)

        atrous_block6 = self.atrous_block6(x)

        atrous_block12 = self.atrous_block12(x)

        atrous_block18 = self.atrous_block18(x)

        out = self.conv_1x1_output(torch.cat([image_features, atrous_block1, atrous_block6,
                                              atrous_block12, atrous_block18], dim=1))
        return out

class deConvBNRelu(nn.Module):
    def __init__(self,inChannel,outChannel,kernel_size=3,padding=0,bias=False,scale=2):
        super(deConvBNRelu, self).__init__()
        self.conv =nn.ConvTranspose2d(inChannel, outChannel, kernel_size=kernel_size,stride=scale, padding=padding,bias=bias)
        self.bn = nn.BatchNorm2d(outChannel, momentum=0.1, affine=True)
        self.reLu = nn.ReLU(inplace=True)

    def forward(self, x):
        x=self.conv(x)
        x=self.bn(x)
        return self.reLu(x)
class pool_block(nn.Module):
    def __init__(self,inDepth,inH,inW,pool_factor, outDepth=32 ):
        super().__init__()
        self.inH=inH
        self.inW=inW
        pool_size = strides = [inH // pool_factor,inW // pool_factor]

        self.outDepth=outDepth
        self.AvgPool2d=nn.AvgPool2d(pool_size, strides)
        self.ConvBNRelu=ConvBNRelu(inDepth,outDepth,kernel_size=1,bias=False)
        # self.Upsample=nn.Upsample([inH,inW], mode='nearest')
    def forward(self, feats):
        x = self.AvgPool2d(feats)
        x = self.ConvBNRelu(x)
        x = F.upsample_bilinear(x,[self.inH,self.inW])
        return x

class shiftLeftPoolBlock(nn.Module):
    def __init__(self,inDepth,inH,inW,pool_factor, outDepth=32 ):
        super().__init__()
        self.inH=inH
        self.inW=inW
        pool_size = strides = [inH // pool_factor,inW // pool_factor]
        self.padSize = int(pool_size[0] / 2)
        self.pad = nn.ReplicationPad2d(padding=(self.padSize, self.padSize, 0, 0))
        self.pool_factor=pool_factor
        self.outDepth=outDepth
        self.AvgPool2d=nn.AvgPool2d(pool_size, strides)
        self.ConvBNRelu=ConvBNRelu(inDepth,outDepth,kernel_size=1,bias=False)
        self.Upsample=nn.Upsample([inH,inW+2*self.padSize], mode='nearest')
    def forward(self, feats):
        x=self.pad(feats)
        x = self.AvgPool2d(x)
        x = self.ConvBNRelu(x)
        x = self.Upsample(x)
        # 将pad裁剪成原feat的大小
        x = x[:, :, :, self.padSize:self.inW + self.padSize]
        return x

class shiftTopPoolBlock(nn.Module):
    def __init__(self,inDepth,inH,inW,pool_factor, outDepth=32 ):
        super().__init__()
        self.inH=inH
        self.inW=inW
        pool_size = strides = [inH // pool_factor,inW // pool_factor]
        self.padSize = int(pool_size[0] / 2)
        self.pad = nn.ReplicationPad2d(padding=(0, 0,self.padSize, self.padSize))

        self.outDepth=outDepth
        self.AvgPool2d=nn.AvgPool2d(pool_size, strides)
        self.ConvBNRelu=ConvBNRelu(inDepth,outDepth,kernel_size=1,bias=False)
        self.Upsample=nn.Upsample([inH+2*self.padSize,inW], mode='nearest')
    def forward(self, feats):
        x=self.pad(feats)
        x = self.AvgPool2d(x)
        x = self.ConvBNRelu(x)
        x = self.Upsample(x)
        # 将pad裁剪成原feat的大小
        x = x[:, :,self.padSize:self.inH + self.padSize,: ]
        return x
class shiftTopLeftPoolBlock(nn.Module):
    def __init__(self,inDepth,inH,inW,pool_factor, outDepth=32 ):
        super().__init__()
        self.inH=inH
        self.inW=inW
        pool_size = strides = [inH // pool_factor,inW // pool_factor]
        self.padSize = int(pool_size[0] / 2)
        self.pad = nn.ReplicationPad2d(padding=(self.padSize, self.padSize,self.padSize, self.padSize))

        self.outDepth=outDepth
        self.AvgPool2d=nn.AvgPool2d(pool_size, strides)
        self.ConvBNRelu=ConvBNRelu(inDepth,outDepth,kernel_size=1,bias=False)
        self.Upsample=nn.Upsample([inH+2*self.padSize,inW+2*self.padSize], mode='nearest')
    def forward(self, feats):
        x=self.pad(feats)
        x = self.AvgPool2d(x)
        x = self.ConvBNRelu(x)
        x = self.Upsample(x)
        # 将pad裁剪成原feat的大小
        x = x[:, :,self.padSize:self.inH + self.padSize,self.padSize:self.inW + self.padSize ]
        return x
##-----------------------------------------------------------------------------

class BuildFormer_ShiftPoolingPSPNet(nn.Module):
    def __init__(self ):
        super().__init__()
        self.backbone=BuildFormer.BuildFormer4PSPNet(layers=[2, 2, 6, 2], num_heads=[4, 8, 16, 32],
                                    dims=[96, 192, 192, 192], window_sizes=[16, 16, 16, 16])
        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.convDepth=nn.Conv2d(192,128,1)

        self.normal1=pool_block(128,64,64,1,32)

        self.normal2=pool_block(128,64,64,2,32)
        self.shiftLeft2 =shiftLeftPoolBlock(128,64,64,2,32)
        self.shiftTop2=shiftTopPoolBlock(128,64,64,2,32)
        self.shiftTopLeft2=shiftTopLeftPoolBlock(128,64,64,2,32)

        self.normal4 = pool_block(128, 64, 64, 4, 32)
        self.shiftLeft4 = shiftLeftPoolBlock(128, 64, 64, 4, 32)
        self.shiftTop4 = shiftTopPoolBlock(128, 64, 64, 4, 32)
        self.shiftTopLeft4 = shiftTopLeftPoolBlock(128, 64, 64, 4, 32)

        self.normal6 = pool_block(128, 64, 64, 6, 32)
        self.shiftLeft6 = shiftLeftPoolBlock(128, 64, 64, 6, 32)
        self.shiftTop6 = shiftTopPoolBlock(128, 64, 64, 6, 32)
        self.shiftTopLeft6 = shiftTopLeftPoolBlock(128, 64, 64, 6, 32)

        self.convFuse2=nn.Conv2d(32 * 4, 32, 1, bias=False)
        self.convFuse4 = nn.Conv2d(32 * 4, 32, 1, bias=False)
        self.convFuse6 = nn.Conv2d(32 * 4, 32, 1, bias=False)

        self.convBNRelu=ConvBNRelu(32*8,32,3)

        self.ConvTranspose1=nn.ConvTranspose2d(32, 32, 2, 2)
        self.ConvTranspose2 = nn.ConvTranspose2d(32, 32, 2, 2)
        self.ConvTranspose3 = nn.ConvTranspose2d(32, 32, 2, 2)

        self.Conv1=nn.Conv2d(32, 32, 3, bias=True, padding=1)
        self.Conv2=nn.Conv2d(64, 32, 3, bias=True, padding=1)
        self.Conv3=nn.Conv2d(96, 32, 3, bias=True, padding=1)
        self.Conv4=nn.Conv2d(32, 2, 3, bias=True, padding=1)
    def forward(self, x):
        _,_,_,high_level_features =self.backbone (x)
        # high_level_features=self.backbone (x)
        #调整通道到128
        high_level_features=self.convDepth(high_level_features)

        pool_outs = [high_level_features]

        #p = 1
        pooled = self.normal1(high_level_features)
        pool_outs.append(pooled)

        #p=2
        # 原始的pool
        normal = self.normal2(high_level_features)
        # shiftLeft pool
        left = self.shiftLeft2(high_level_features)
        # shiftTop pool
        top =self.shiftTop2(high_level_features)
        # shiftTopLeft pool
        topleft =self.shiftTopLeft2(high_level_features)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse2(o)
        pool_outs.append(o)
        # p=4
        # 原始的pool
        normal = self.normal4(high_level_features)
        # shiftLeft pool
        left = self.shiftLeft4(high_level_features)
        # shiftTop pool
        top = self.shiftTop4(high_level_features)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft4(high_level_features)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse4(o)
        pool_outs.append(o)
        # p=6
        # 原始的pool
        normal = self.normal6(high_level_features)
        # shiftLeft pool
        left = self.shiftLeft6(high_level_features)
        # shiftTop pool
        top = self.shiftTop6(high_level_features)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft6(high_level_features)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse6(o)
        pool_outs.append(o)


        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)


        ##改进的上采样方式1

        # up1 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(o)
        # up1 = Conv2D(32, 3, padding='same')(up1)
        up1 = self.ConvTranspose1(o)
        up1 = self.Conv1(up1)

        # up2 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(up1)
        # oto2 = UpSampling2D(4)(o)
        # up2 = Concatenate(axis=3)([oto2, up2])
        # up2 = Conv2D(32, 3, padding='same')(up2)
        up2 = self.ConvTranspose2(up1)
        oto2 = F.upsample_nearest(o,scale_factor=4)
        up2 = torch.cat([oto2, up2],dim=1)
        up2 = self.Conv2(up2)

        # up3 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(up2)
        # oto3 = UpSampling2D(8)(o)
        # up1to3 = UpSampling2D(4)(up1)
        # up3 = Concatenate(axis=3)([oto3, up1to3, up3])
        # up3 = Conv2D(32, 3, padding='same')(up3)
        up3 = self.ConvTranspose3(up2)
        oto3 =  F.upsample_nearest(o,scale_factor=8)
        up1to3 = F.upsample_nearest(up1,scale_factor=4)
        up3 = torch.cat([oto3, up1to3, up3],dim=1)
        up3 = self.Conv3(up3)

        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        out = self.Conv4(up3)

        return out

class CFENet_PSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = CFENet.CLNet(nclass=2)
        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.inDepth=144
        self.outDepth=self.inDepth//4
        # self.convDepth = nn.Conv2d(144, 128, 1)
        self.normal1 = pool_block(self.inDepth, 64, 64, 1, self.outDepth)
        self.normal2 = pool_block(self.inDepth, 64, 64, 2, self.outDepth)
        self.normal3 = pool_block(self.inDepth, 64, 64, 3, self.outDepth)
        self.normal6 = pool_block(self.inDepth, 64, 64, 6, self.outDepth)
        self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.outDepth, 3,bias=False)
        self.Conv4 = nn.Conv2d(self.outDepth, 2, 1, bias=True)

    def forward(self, x):
        _, high_level_features = self.backbone(x)
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)
        pool_outs = [high_level_features]
        # p = 1
        normal1 = self.normal1(high_level_features)
        pool_outs.append(normal1)
        # p=2
        # 原始的pool
        normal2 = self.normal2(high_level_features)
        pool_outs.append(normal2)
        # p=4
        # 原始的pool
        normal3 = self.normal3(high_level_features)
        pool_outs.append(normal3)
        # p=6
        # 原始的pool
        normal6 = self.normal6(high_level_features)
        pool_outs.append(normal6)
        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)
        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        o = self.Conv4(o)
        #直接上采样到输出大小
        # o=F.upsample(o,[512,512],mode="bilinear")
        o=F.upsample_bilinear(o,[512,512])
        #o= F.upsample_nearest(o, size=[512,512])
        return o
class CFENet_ShiftPoolingPSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = CFENet.CLNet(nclass=2)
        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.inDepth = 144
        self.outDepth = self.inDepth // 4
        self.normal1 = pool_block(self.inDepth, 64, 64, 1, self.outDepth)

        self.normal2 = pool_block(self.inDepth, 64, 64, 2, self.outDepth)
        self.shiftLeft2 = shiftLeftPoolBlock(self.inDepth, 64, 64, 2, self.outDepth)
        self.shiftTop2 = shiftTopPoolBlock(self.inDepth, 64, 64, 2, self.outDepth)
        self.shiftTopLeft2 = shiftTopLeftPoolBlock(self.inDepth, 64, 64, 2, self.outDepth)

        self.normal4 = pool_block(self.inDepth, 64, 64, 4, self.outDepth)
        self.shiftLeft4 = shiftLeftPoolBlock(self.inDepth, 64, 64, 4, self.outDepth)
        self.shiftTop4 = shiftTopPoolBlock(self.inDepth, 64, 64, 4, self.outDepth)
        self.shiftTopLeft4 = shiftTopLeftPoolBlock(self.inDepth, 64, 64, 4, self.outDepth)

        self.normal6 = pool_block(self.inDepth, 64, 64, 6, self.outDepth)
        self.shiftLeft6 = shiftLeftPoolBlock(self.inDepth, 64, 64, 6, self.outDepth)
        self.shiftTop6 = shiftTopPoolBlock(self.inDepth, 64, 64, 6, self.outDepth)
        self.shiftTopLeft6 = shiftTopLeftPoolBlock(self.inDepth, 64, 64, 6, self.outDepth)

        self.convFuse2 = nn.Conv2d(self.outDepth * 4, self.outDepth, 1, bias=False)
        self.convFuse4 = nn.Conv2d(self.outDepth * 4, self.outDepth, 1, bias=False)
        self.convFuse6 = nn.Conv2d(self.outDepth * 4, self.outDepth, 1, bias=False)

        self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.outDepth, 3)

        self.ConvTranspose1 = nn.ConvTranspose2d(self.outDepth, self.outDepth, 2, 2)
        self.ConvTranspose2 = nn.ConvTranspose2d(self.outDepth, self.outDepth, 2, 2)
        self.ConvTranspose3 = nn.ConvTranspose2d(self.outDepth, self.outDepth, 2, 2)

        self.Conv1 = nn.Conv2d(self.outDepth, self.outDepth, 3, bias=True, padding=1)
        self.Conv2 = nn.Conv2d(self.outDepth * 2, self.outDepth, 3, bias=True, padding=1)
        self.Conv3 = nn.Conv2d(self.outDepth * 3, self.outDepth, 3, bias=True, padding=1)

        self.Conv4 = nn.Conv2d(self.outDepth, 2, 1, bias=True)

    def forward(self, x):
        _, high_level_features = self.backbone(x)
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)

        pool_outs = [high_level_features]

        # p = 1
        pooled = self.normal1(high_level_features)
        pool_outs.append(pooled)

        # p=2
        # 原始的pool
        normal = self.normal2(high_level_features)
        # shiftLeft pool
        left = self.shiftLeft2(high_level_features)
        # shiftTop pool
        top = self.shiftTop2(high_level_features)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft2(high_level_features)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse2(o)
        pool_outs.append(o)
        # p=4
        # 原始的pool
        normal = self.normal4(high_level_features)
        # shiftLeft pool
        left = self.shiftLeft4(high_level_features)
        # shiftTop pool
        top = self.shiftTop4(high_level_features)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft4(high_level_features)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse4(o)
        pool_outs.append(o)
        # p=6
        # 原始的pool
        normal = self.normal6(high_level_features)
        # shiftLeft pool
        left = self.shiftLeft6(high_level_features)
        # shiftTop pool
        top = self.shiftTop6(high_level_features)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft6(high_level_features)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse6(o)
        pool_outs.append(o)

        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)

        ##改进的上采样方式1

        # up1 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(o)
        # up1 = Conv2D(32, 3, padding='same')(up1)
        up1 = self.ConvTranspose1(o)
        up1 = self.Conv1(up1)

        # up2 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(up1)
        # oto2 = UpSampling2D(4)(o)
        # up2 = Concatenate(axis=3)([oto2, up2])
        # up2 = Conv2D(32, 3, padding='same')(up2)
        up2 = self.ConvTranspose2(up1)
        oto2 = F.upsample_nearest(o, scale_factor=4)
        up2 = torch.cat([oto2, up2], dim=1)
        up2 = self.Conv2(up2)

        # up3 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(up2)
        # oto3 = UpSampling2D(8)(o)
        # up1to3 = UpSampling2D(4)(up1)
        # up3 = Concatenate(axis=3)([oto3, up1to3, up3])
        # up3 = Conv2D(32, 3, padding='same')(up3)
        up3 = self.ConvTranspose3(up2)
        oto3 = F.upsample_nearest(o, scale_factor=8)
        up1to3 = F.upsample_nearest(up1, scale_factor=4)
        up3 = torch.cat([oto3, up1to3, up3], dim=1)
        up3 = self.Conv3(up3)

        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        out = self.Conv4(up3)

        return out

class ResNet152_PSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = ResNet.resnet152()
        # self.backbone=BuildFormer.BuildFormerSegDP()
        # self.convDepth = nn.Conv2d(144, 128, 1)
        self.inDepth=2048
        self.outDepth=512
        self.normal1 = pool_block(self.inDepth, 64, 64, 1, self.outDepth)
        self.normal2 = pool_block(self.inDepth, 64, 64, 2, self.outDepth)
        self.normal3 = pool_block(self.inDepth, 64, 64, 3, self.outDepth)
        self.normal6 = pool_block(self.inDepth, 64, 64, 6, self.outDepth)
        self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.outDepth, 3,bias=False)
        self.Conv4 = nn.Conv2d(self.outDepth, 2, 1, bias=True)

    def forward(self, x):
        _,_,_,high_level_features = self.backbone(x)
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)
        pool_outs = [high_level_features]
        # p = 1
        normal1 = self.normal1(high_level_features)
        pool_outs.append(normal1)
        # p=2
        # 原始的pool
        normal2 = self.normal2(high_level_features)
        pool_outs.append(normal2)
        # p=4
        # 原始的pool
        normal3 = self.normal3(high_level_features)
        pool_outs.append(normal3)
        # p=6
        # 原始的pool
        normal6 = self.normal6(high_level_features)
        pool_outs.append(normal6)
        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)
        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        o = self.Conv4(o)
        #直接上采样到输出大小
        o= F.upsample_bilinear(o, size=[512,512])
        return o
class ResNet152_ShiftPoolingPSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = ResNet.resnet152()
        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.convDepth = nn.Conv2d(2048, 128, 1)
        self.inDepth = 128
        self.outDepth = self.inDepth//4
        self.normal1 = pool_block(self.inDepth, 64, 64, 1, self.outDepth)

        self.normal2 = pool_block(self.inDepth, 64, 64, 2, self.outDepth)
        self.shiftLeft2 = shiftLeftPoolBlock(self.inDepth, 64, 64, 2, self.outDepth)
        self.shiftTop2 = shiftTopPoolBlock(self.inDepth, 64, 64, 2, self.outDepth)
        self.shiftTopLeft2 = shiftTopLeftPoolBlock(self.inDepth, 64, 64, 2, self.outDepth)

        self.normal4 = pool_block(self.inDepth, 64, 64, 4, self.outDepth)
        self.shiftLeft4 = shiftLeftPoolBlock(self.inDepth, 64, 64, 4, self.outDepth)
        self.shiftTop4 = shiftTopPoolBlock(self.inDepth, 64, 64, 4, self.outDepth)
        self.shiftTopLeft4 = shiftTopLeftPoolBlock(self.inDepth, 64, 64, 4, self.outDepth)

        self.normal6 = pool_block(self.inDepth, 64, 64, 6, self.outDepth)
        self.shiftLeft6 = shiftLeftPoolBlock(self.inDepth, 64, 64, 6, self.outDepth)
        self.shiftTop6 = shiftTopPoolBlock(self.inDepth, 64, 64, 6, self.outDepth)
        self.shiftTopLeft6 = shiftTopLeftPoolBlock(self.inDepth, 64, 64, 6, self.outDepth)

        self.convFuse2 = nn.Conv2d(self.outDepth * 4, self.outDepth, 1, bias=False)
        self.convFuse4 = nn.Conv2d(self.outDepth * 4, self.outDepth, 1, bias=False)
        self.convFuse6 = nn.Conv2d(self.outDepth * 4, self.outDepth, 1, bias=False)

        self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.outDepth, 3)

        self.ConvTranspose1 = nn.ConvTranspose2d(self.outDepth, self.outDepth, 2, 2)
        self.ConvTranspose2 = nn.ConvTranspose2d(self.outDepth, self.outDepth, 2, 2)
        self.ConvTranspose3 = nn.ConvTranspose2d(self.outDepth, self.outDepth, 2, 2)

        self.Conv1 = nn.Conv2d(self.outDepth, self.outDepth, 3, bias=True, padding=1)
        self.Conv2 = nn.Conv2d(self.outDepth*2, self.outDepth, 3, bias=True, padding=1)
        self.Conv3 = nn.Conv2d(self.outDepth*3, self.outDepth, 3, bias=True, padding=1)

        self.Conv4 = nn.Conv2d(self.outDepth, 2, 1, bias=True)

    def forward(self, x):
        _,_,_,high_level_features = self.backbone(x)
        # high_level_features=self.backbone (x)
        # 调整通道到128
        high_level_features = self.convDepth(high_level_features)

        pool_outs = [high_level_features]

        # p = 1
        pooled = self.normal1(high_level_features)
        pool_outs.append(pooled)

        # p=2
        # 原始的pool
        normal = self.normal2(high_level_features)
        # shiftLeft pool
        left = self.shiftLeft2(high_level_features)
        # shiftTop pool
        top = self.shiftTop2(high_level_features)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft2(high_level_features)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse2(o)
        pool_outs.append(o)
        # p=4
        # 原始的pool
        normal = self.normal4(high_level_features)
        # shiftLeft pool
        left = self.shiftLeft4(high_level_features)
        # shiftTop pool
        top = self.shiftTop4(high_level_features)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft4(high_level_features)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse4(o)
        pool_outs.append(o)
        # p=6
        # 原始的pool
        normal = self.normal6(high_level_features)
        # shiftLeft pool
        left = self.shiftLeft6(high_level_features)
        # shiftTop pool
        top = self.shiftTop6(high_level_features)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft6(high_level_features)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse6(o)
        pool_outs.append(o)

        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)

        ##改进的上采样方式1

        # up1 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(o)
        # up1 = Conv2D(32, 3, padding='same')(up1)
        up1 = self.ConvTranspose1(o)
        up1 = self.Conv1(up1)

        # up2 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(up1)
        # oto2 = UpSampling2D(4)(o)
        # up2 = Concatenate(axis=3)([oto2, up2])
        # up2 = Conv2D(32, 3, padding='same')(up2)
        up2 = self.ConvTranspose2(up1)
        oto2 = F.upsample_nearest(o, scale_factor=4)
        up2 = torch.cat([oto2, up2], dim=1)
        up2 = self.Conv2(up2)

        # up3 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(up2)
        # oto3 = UpSampling2D(8)(o)
        # up1to3 = UpSampling2D(4)(up1)
        # up3 = Concatenate(axis=3)([oto3, up1to3, up3])
        # up3 = Conv2D(32, 3, padding='same')(up3)
        up3 = self.ConvTranspose3(up2)
        oto3 = F.upsample_nearest(o, scale_factor=8)
        up1to3 = F.upsample_nearest(up1, scale_factor=4)
        up3 = torch.cat([oto3, up1to3, up3], dim=1)
        up3 = self.Conv3(up3)

        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        out = self.Conv4(up3)

        return out

class ResNet50_PSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = ResNet.resnet50()
        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.inDepth=2048
        self.outDepth=self.inDepth//4
        # self.convDepth = nn.Conv2d(144, 128, 1)
        self.normal1 = pool_block(self.inDepth, 64, 64, 1, self.outDepth)
        self.normal2 = pool_block(self.inDepth, 64, 64, 2, self.outDepth)
        self.normal3 = pool_block(self.inDepth, 64, 64, 3, self.outDepth)
        self.normal6 = pool_block(self.inDepth, 64, 64, 6, self.outDepth)
        self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.outDepth, 3,bias=False)
        self.Conv4 = nn.Conv2d(self.outDepth, 2, 1, bias=True)

    def forward(self, x):
        _,_,_,high_level_features = self.backbone(x)
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)
        pool_outs = [high_level_features]
        # p = 1
        normal1 = self.normal1(high_level_features)
        pool_outs.append(normal1)
        # p=2
        # 原始的pool
        normal2 = self.normal2(high_level_features)
        pool_outs.append(normal2)
        # p=4
        # 原始的pool
        normal3 = self.normal3(high_level_features)
        pool_outs.append(normal3)
        # p=6
        # 原始的pool
        normal6 = self.normal6(high_level_features)
        pool_outs.append(normal6)
        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)
        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        o = self.Conv4(o)
        #直接上采样到输出大小
        # o=F.upsample(o,[512,512],mode="bilinear")
        o=F.upsample_bilinear(o,[512,512])
        #o= F.upsample_nearest(o, size=[512,512])
        return o
class TVResNet50_PSPNet16(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = torchvision.models.resnet50()
        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.inDepth=2048
        self.outDepth=self.inDepth//4
        # self.convDepth = nn.Conv2d(144, 128, 1)
        self.normal1 = pool_block(self.inDepth, 16, 16, 1, self.outDepth)
        self.normal2 = pool_block(self.inDepth, 16, 16, 2, self.outDepth)
        self.normal3 = pool_block(self.inDepth, 16, 16, 3, self.outDepth)
        self.normal6 = pool_block(self.inDepth, 16, 16, 6, self.outDepth)
        self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.outDepth, 3,bias=False)
        self.Conv4 = nn.Conv2d(self.outDepth, 2, 1, bias=True)

    def forward(self, x):
        for name,module in self.backbone.named_children():
            if name=='fc':
                break
            x=module(x)
            if name=="layer4":
                high_level_features=x
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)
        pool_outs = [high_level_features]
        # p = 1
        normal1 = self.normal1(high_level_features)
        pool_outs.append(normal1)
        # p=2
        # 原始的pool
        normal2 = self.normal2(high_level_features)
        pool_outs.append(normal2)
        # p=4
        # 原始的pool
        normal3 = self.normal3(high_level_features)
        pool_outs.append(normal3)
        # p=6
        # 原始的pool
        normal6 = self.normal6(high_level_features)
        pool_outs.append(normal6)
        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)
        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        o = self.Conv4(o)
        #直接上采样到输出大小
        # o=F.upsample(o,[512,512],mode="bilinear")
        o=F.upsample_bilinear(o,[512,512])
        #o= F.upsample_nearest(o, size=[512,512])
        return o


class ResNet50_M_PSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = ResNet.resnet50()
        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.inDepth=2048
        self.outDepth=self.inDepth//4
        # self.convDepth = nn.Conv2d(144, 128, 1)
        self.normal1A = pool_block(self.inDepth, 64, 64, 1, self.outDepth)
        self.normal2A = pool_block(self.inDepth, 64, 64, 2, self.outDepth)
        self.normal3A = pool_block(self.inDepth, 64, 64, 3, self.outDepth)
        self.normal6A = pool_block(self.inDepth, 64, 64, 6, self.outDepth)
        self.convBNReluA = ConvBNRelu(self.outDepth * 8, self.outDepth, 3,bias=False)

        self.normal1B = pool_block(256, 128, 128, 1, 64)
        self.normal2B = pool_block(256, 128, 128, 2, 64)
        self.normal3B = pool_block(256, 128, 128, 3, 64)
        self.normal6B = pool_block(256, 128, 128, 6, 64)
        self.convBNReluB = ConvBNRelu(256 * 2, 2, 3, bias=False)
        self.Out = nn.Conv2d(2, 2, 1, bias=True)

    def forward(self, x):
        layer1,layer2,layer3,layer4 = self.backbone(x)
        ##################################################
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)
        pool_outs = [layer4]
        # p = 1
        normal1 = self.normal1A(layer4)
        pool_outs.append(normal1)
        # p=2
        # 原始的pool
        normal2 = self.normal2A(layer4)
        pool_outs.append(normal2)
        # p=4
        # 原始的pool
        normal3 = self.normal3A(layer4)
        pool_outs.append(normal3)
        # p=6
        # 原始的pool
        normal6 = self.normal6A(layer4)
        pool_outs.append(normal6)
        A = torch.cat(pool_outs, dim=1)
        A = self.convBNReluA(A)
        A = F.upsample_bilinear(A, [128, 128])

        #################################################################
        ##################################################
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)
        pool_outs = [layer1]
        # p = 1
        normal1 = self.normal1B(layer1)
        pool_outs.append(normal1)
        # p=2
        # 原始的pool
        normal2 = self.normal2B(layer1)
        pool_outs.append(normal2)
        # p=4
        # 原始的pool
        normal3 = self.normal3B(layer1)
        pool_outs.append(normal3)
        # p=6
        # 原始的pool
        normal6 = self.normal6B(layer1)
        pool_outs.append(normal6)
        B = torch.cat(pool_outs, dim=1)+A

        B = self.convBNReluB(B)

        B = F.upsample_bilinear(B, [512, 512])
#########################################################
        # 变成2通过
        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        # o = self.Out(A+B)
        # 直接上采样到输出大小
        # o=F.upsample(o,[512,512],mode="bilinear")


        #o= F.upsample_nearest(o, size=[512,512])
        return B

class TVResNet50_PSPNet64(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = torchvision.models.resnet50()
        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.inDepth=512
        self.outDepth=self.inDepth//4
        # self.convDepth = nn.Conv2d(144, 128, 1)
        self.normal1 = pool_block(self.inDepth, 32, 32, 1, self.outDepth)
        self.normal2 = pool_block(self.inDepth, 32, 32, 2, self.outDepth)
        self.normal3 = pool_block(self.inDepth, 32, 32, 3, self.outDepth)
        self.normal6 = pool_block(self.inDepth, 32, 32, 6, self.outDepth)
        self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.outDepth, 3,bias=False)
        self.Conv4 = nn.Conv2d(self.outDepth, 2, 1, bias=True)

    def forward(self, x):
        for name,module in self.backbone.named_children():
            if name=='fc':
                break
            x=module(x)
            if name=="layer2":
                high_level_features=x
                break
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)
        pool_outs = [high_level_features]
        # p = 1
        normal1 = self.normal1(high_level_features)
        pool_outs.append(normal1)
        # p=2
        # 原始的pool
        normal2 = self.normal2(high_level_features)
        pool_outs.append(normal2)
        # p=4
        # 原始的pool
        normal3 = self.normal3(high_level_features)
        pool_outs.append(normal3)
        # p=6
        # 原始的pool
        normal6 = self.normal6(high_level_features)
        pool_outs.append(normal6)
        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)
        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        o = self.Conv4(o)
        #直接上采样到输出大小
        # o=F.upsample(o,[512,512],mode="bilinear")
        o=F.upsample_bilinear(o,[256,256])
        #o= F.upsample_nearest(o, size=[512,512])
        return o

class PSPBlock(nn.Module):
    def __init__(self,inDim,H,W,factorNum=4,factor1=1,factor2=2,factor3=3,factor4=6):
        super().__init__()

        self.inDepth=inDim
        self.outDepth=self.inDepth//4
        # self.convDepth = nn.Conv2d(144, 128, 1)
        self.normal1 = pool_block(self.inDepth, H, W, factor1, self.outDepth)
        self.normal2 = pool_block(self.inDepth, H, W, factor2, self.outDepth)
        self.normal3 = pool_block(self.inDepth, H, W, factor3, self.outDepth)
        self.normal4 = pool_block(self.inDepth, H, W, factor4, self.outDepth)
        if factorNum==4:
            self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.inDepth, 3,bias=False)
        elif factorNum==2:
            self.convBNRelu = ConvBNRelu(self.outDepth * 6, self.inDepth, 3, bias=False)


        self.factorNum=factorNum
        # self.out = nn.Conv2d(self.outDepth, 2, 1, bias=True)

    def forward(self, x):

        ##################################################
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)
        pool_outs = [x]
        # p = 1
        normal1 = self.normal1(x)
        pool_outs.append(normal1)
        # p=2
        # 原始的pool
        normal2 = self.normal2(x)
        pool_outs.append(normal2)
        # p=4
        # 原始的pool
        if self.factorNum==4:
            normal3 = self.normal3(x)
            pool_outs.append(normal3)
            # p=6
            # 原始的pool
            normal4 = self.normal4(x)
            pool_outs.append(normal4)

        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)
        # o=self.out(o)
        return o

class TVResNet50_M_PSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone =torchvision.models.resnet50(pretrained=False)
        # self.backbone =torchvision.models.resnet50()

        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.pspblock1 = PSPBlock(256, 64, 64, 4, 1, 4,6,12)
        self.pspblock2 = PSPBlock(512,  32,  32, 4, 1, 2,3,6)
        self.pspblock3 = PSPBlock(1024, 16,  16, 4, 1, 2,3,6)
        self.pspblock4 = PSPBlock(2048, 8,  8, 4, 1, 2,3,6)

        # self.pspblock32 = PSPBlock(1024, 32, 32)
        #
        # self.pspblock64 = PSPBlock(512, 64, 64)
        # self.layer2out = ConvBNRelu(128, 2, 3, bias=False)
        # self.pspblock128 = PSPBlock(256, 128, 128)


        #全局特征
        # self.CBRIn = ConvBNRelu(3, 128, 3, bias=False)
        # self.attn = nn.Sequential(
        #     patchNet.LinearAttention(128,reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16)
        # )
        # self.CBROut = ConvBNRelu(128, 2, 3, bias=False)
        #
        self.tail=nn.Conv2d(8, 2, 1, bias=False)


    def forward(self, input):
        ############全局特征
        # g=F.interpolate(input,[128,128])
        # g=self.CBRIn(g)
        # g=self.attn(g) #128大小，512通道
        # g=self.CBROut(g)
        # g128 = F.upsample_bilinear(g, [512, 512])

        # g = F.interpolate(input, [64, 64])
        # g = self.CBRIn(g)
        # g = self.attn(g)  # 64大小，512通道
        # g = self.CBROut(g)
        # g64 = F.upsample_bilinear(g, [512, 512])

        # g = F.interpolate(input, [32, 32])
        # g = self.CBRIn(g)
        # g = self.attn(g)  # 32大小，512通道
        # g = self.CBROut(g)
        # g32=F.upsample_bilinear(g,[512,512])


        ##############局部特征
        x=input
        for name,module in self.backbone.named_children():
            if name=='fc':
                break
            x=module(x)
            if name=="layer1":
                layer1=x
            elif name=="layer2":
                layer2=x
            elif name=="layer3":
                layer3=x
            elif name=="layer4":
                layer4=x
        layer1 = self.pspblock1(layer1)
        layer1= F.upsample_bilinear(layer1,(256,256))

        layer2 = self.pspblock2(layer2)
        layer2 = F.upsample_bilinear(layer2, (256, 256))

        layer3 = self.pspblock3(layer3)
        layer3 = F.upsample_bilinear(layer3, (256, 256))

        layer4 = self.pspblock4(layer4)
        layer4 = F.upsample_bilinear(layer4, (256, 256))
        out=torch.cat([layer1,layer2,layer3,layer4],dim=1)
        out=self.tail(out)

        return out

class TVResNet50_M_F_PSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone =torchvision.models.resnet50(pretrained=False)
        # self.backbone =torchvision.models.resnet50()

        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.pspblock1 = PSPBlock(256, 64, 64, 4, 1, 4,6,12)
        self.pspblock2 = PSPBlock(512,  32,  32, 4, 1, 2,3,6)
        self.pspblock3 = PSPBlock(1024, 16,  16, 4, 1, 2,3,6)
        self.pspblock4 = PSPBlock(2048, 8,  8, 4, 1, 2,3,6)

        # self.pspblock32 = PSPBlock(1024, 32, 32)
        #
        # self.pspblock64 = PSPBlock(512, 64, 64)
        # self.layer2out = ConvBNRelu(128, 2, 3, bias=False)
        # self.pspblock128 = PSPBlock(256, 128, 128)


        #全局特征
        self.convBNRelu = ConvBNRelu(1024, 256, 3, bias=False)
        # self.LinearAttention=patchNet.LinearAttention(256)
        self.LinearAttention=nn.Sequential(
            patchNet.LWMSA(256)

        )
        # self.LinearAttention = patchNet.LWMSA(256)
        # self.LinearAttention =patchNet.LinformerBlock(256,16*16)
        self.Conv=nn.Conv2d(256, 2, 1, bias=True)

        self.tail=nn.Conv2d(10, 2, 1, bias=False)


    def forward(self, input):
        ############全局特征


        ##############局部特征
        x=input
        for name,module in self.backbone.named_children():
            if name=='fc':
                break
            x=module(x)
            if name=="layer1":
                layer1=x
            elif name=="layer2":
                layer2=x
            elif name=="layer3":
                layer3=x
            elif name=="layer4":
                layer4=x
        layer1 = self.pspblock1(layer1)
        layer1= F.upsample_bilinear(layer1,(256,256))



        layer2 = self.pspblock2(layer2)
        layer2 = F.upsample_bilinear(layer2, (256, 256))

        # transformer##########################
        g4 = self.convBNRelu(layer3)
        g4 = self.LinearAttention(g4) + g4
        g4 = self.Conv(g4)
        g4 = F.upsample_bilinear(g4, (256, 256))
        #####################################################


        layer3 = self.pspblock3(layer3)
        layer3 = F.upsample_bilinear(layer3, (256, 256))





        layer4 = self.pspblock4(layer4)
        layer4 = F.upsample_bilinear(layer4, (256, 256))

        out=torch.cat([layer1,layer2,layer3,layer4,g4],dim=1)
        out=self.tail(out)

        return out
class TVResNet50_edge(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone =torchvision.models.resnet50(pretrained=False)
        # self.backboneEdge =torchvision.models.resnet50(pretrained=False)
        self.backboneEdge = torchvision.models.segmentation.deeplabv3_resnet50(num_classes=2)

        self.inDepth = 512
        self.outDepth = self.inDepth // 4
        # self.convDepth = nn.Conv2d(144, 128, 1)
        self.normal1 = pool_block(self.inDepth, 32, 32, 1, self.outDepth)
        self.normal2 = pool_block(self.inDepth, 32, 32, 2, self.outDepth)
        self.normal3 = pool_block(self.inDepth, 32, 32, 3, self.outDepth)
        self.normal6 = pool_block(self.inDepth, 32, 32, 6, self.outDepth)
        self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.outDepth, 3, bias=False)
        self.Conv4 = nn.Conv2d(self.outDepth, 2, 1, bias=True)
        self.tail=nn.Conv2d(4, 2, 1, bias=True)

    def forward(self, input):
        # 定义Sobel算子
        sobel_x = torch.tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=torch.float32)
        sobel_y = torch.tensor([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=torch.float32)

        # # 将Sobel算子转换为适用于PyTorch的格式
        sobel_x = sobel_x.view(1, 1, 3, 3)
        sobel_x = sobel_x.repeat(3,1,1,1)
        sobel_y = sobel_y.view(1, 1, 3, 3)
        sobel_y = sobel_y.repeat(3, 1, 1, 1)
        sobel_x = sobel_x.cuda()
        sobel_y = sobel_y.cuda()
        # 应用Sobel算子
        edge_x = F.conv2d(input, sobel_x, padding=1,groups=3)
        edge_y = F.conv2d(input, sobel_y, padding=1,groups=3)

        # 计算边缘幅度和方向
        edge = torch.sqrt(edge_x ** 2 + edge_y ** 2)

        # edge=self.backboneEdge(edge)['out']

        # x = edge
        # for name, module in self.backboneEdge.named_children():
        #     if name == 'fc':
        #         break
        #     x = module(x)
        #     if name == "layer2":
        #         edge = x
        #         break


        ############全局特征


        #############局部特征
        x=input
        for name,module in self.backbone.named_children():
            if name=='fc':
                break
            x=module(x)
            if name=="layer2":
                layer2=x
                break

        # layer2=torch.cat([edge,layer2],dim=1)
        high_level_features=layer2
        pool_outs = [high_level_features]
        # p = 1
        normal1 = self.normal1(high_level_features)
        pool_outs.append(normal1)
        # p=2
        # 原始的pool
        normal2 = self.normal2(high_level_features)
        pool_outs.append(normal2)
        # p=4
        # 原始的pool
        normal3 = self.normal3(high_level_features)
        pool_outs.append(normal3)
        # p=6
        # 原始的pool
        normal6 = self.normal6(high_level_features)
        pool_outs.append(normal6)
        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)
        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        o = self.Conv4(o)
        # 直接上采样到输出大小
        # o=F.upsample(o,[512,512],mode="bilinear")
        o = F.upsample_bilinear(o, [256, 256])
        # o= torch.cat([o,edge],dim=1)
        # o=self.tail(o)

        return o

class TVResNet50_luansheng(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone =torchvision.models.resnet50(pretrained=False)

        #全局特征
        self.conv2=ConvBNRelu(512, 256, 3, bias=False)
        self.conv3 = ConvBNRelu(1024, 256, 3, bias=False)
        self.conv4 = ConvBNRelu(2048, 256, 3, bias=False)
        self.luansheng= nn.Sequential(
            ConvBNRelu(256, 256),
            ConvBNRelu(256, 256),
            ConvBNRelu(256, 256),
            ConvBNRelu(256, 256),
            ConvBNRelu(256, 2),
        )
        # self.luansheng3 = ASPP(256, 2)
        # self.luansheng4 = ASPP(256, 2)
        # self.luansheng=patchNet.LWMSA(256,window_size=8)
        # self.TranposeConv4=deConvBNRelu(256,256)
        self.deConv4=ConvBNRelu(512, 256, 3, bias=False)

        # self.TranposeConv3 = deConvBNRelu(256, 256)
        self.deConv3=ConvBNRelu(512, 256, 3, bias=False)

        # self.TranposeConv2 =deConvBNRelu(256, 256)
        self.deConv2 = ConvBNRelu(512, 256)

        # self.TranposeConv1 = deConvBNRelu(256, 256,scale=4)

        # self.LinearAttention = patchNet.LWMSA(256)
        # self.LinearAttention =patchNet.LinformerBlock(256,16*16)
        self.tail=nn.Conv2d(6, 2, 1, bias=True)

        # self.tail=nn.Conv2d(10, 2, 1, bias=False)


    def forward(self, input):
        ############全局特征


        #############局部特征
        x=input
        for name,module in self.backbone.named_children():
            if name=='fc':
                break
            x=module(x)
            if name=="layer1":
                layer1=x
            elif name=="layer2":
                layer2=x
                break

            elif name=="layer3":
                layer3=x
            elif name=="layer4":
                layer4=x

        layer2=self.conv2(layer2)

        x16=F.interpolate(layer2,size=(16,16))
        x8 = F.interpolate(layer2, size=(8, 8))
        # x4 = F.interpolate(layer2, size=(4, 4))

        x32=self.luansheng(layer2)
        x16=self.luansheng(x16)
        x8 = self.luansheng(x8)
        # x4 = self.luansheng(x4)

        x32 = F.upsample_bilinear(x32, (256, 256))
        x16=F.upsample_bilinear(x16,(256,256))
        x8=F.upsample_bilinear(x8,(256,256))
        # x4=F.upsample_bilinear(x4,(256,256))

        out=torch.cat((x32,x16,x8),dim=1)
        out = self.tail(out)


        return out

class MWinLWMSA(nn.Module):
    def __init__(self, dim):
        super(MWinLWMSA, self).__init__()
        # global average pooling : init nn.AdaptiveAvgPool2d ;also forward torch.mean(,,keep_dim=True)
        self.win8 = patchNet.LWMSA(dim,8,window_size=8)
        self.win16 = patchNet.LWMSA(dim, 8, window_size=16)
        self.win32 = patchNet.LWMSA(dim, 8, window_size=32)

    def forward(self, x):
        x=self.win32(x)+x
        x=self.win32(x)+x
        x=self.win32(x)+x
        return x

class UNet_luansheng(nn.Module):
    def __init__(self):
        super().__init__()

        # #全局特征
        # self.down1=nn.Sequential(
        #     ConvBNRelu(3, 32),
        #     ConvBNRelu(32, 32),
        # )
        # self.down2 = nn.Sequential(
        #     ConvBNRelu(32, 32),
        #     ConvBNRelu(32, 32),
        # )
        # self.down3 = nn.Sequential(
        #     ConvBNRelu(32, 32),
        #     ConvBNRelu(32, 32),
        # )
        # self.down4 = nn.Sequential(
        #     ConvBNRelu(32, 32),
        #     ConvBNRelu(32, 32),
        # )
        # self.down5 = nn.Sequential(
        #     ConvBNRelu(32, 32),
        #     ConvBNRelu(32, 32),
        # )
        self.mwin=MWinLWMSA()
        self.luansheng=nn.Sequential(
            ConvBNRelu(3, 32),
            ConvBNRelu(32, 64),
            ConvBNRelu(64, 128),
            ConvBNRelu(128, 256),
            ConvBNRelu(256, 128),
            ConvBNRelu(128, 64),
            ConvBNRelu(64, 32),
            ConvBNRelu(32, 2),
        )


        self.tail=nn.Conv2d(10, 2, 1, bias=True)

        # self.tail=nn.Conv2d(10, 2, 1, bias=False)


    def forward(self, input):

        x128=F.interpolate(input,(128,128))
        x64=F.interpolate(input,(64,64))
        x32=F.interpolate(input, (32, 32))
        x16=F.interpolate(input, (16, 16))
        # x128=F.max_pool2d(x256,kernel_size=2,stride=2)
        # x128=self.down2(x128)
        #
        # x64 = F.max_pool2d(x128, kernel_size=2,stride=2)
        # x64=self.down3(x64)
        #
        # x32 = F.max_pool2d(x64, kernel_size=2,stride=2)
        # x32=self.down4(x32)
        #
        # x16 = F.max_pool2d(x32, kernel_size=2,stride=2)
        # x16=self.down5(x16)

        x256=self.luansheng(input)
        x128 = self.luansheng(x128)
        x64 = self.luansheng(x64)
        x32 = self.luansheng(x32)
        x16 = self.luansheng(x16)

        x16=F.upsample_nearest(x16,size=(256,256))
        x32 = F.upsample_nearest(x32,size=(256,256))
        x64 = F.upsample_nearest(x64, size=(256, 256))
        x128 = F.upsample_nearest(x128, size=(256, 256))

        out=torch.cat([x16,x32,x64,x128,x256],dim=1)

        out = self.tail(out)


        return out
class TranBlock(nn.Module):
    def __init__(self,d_model):
        super(TranBlock, self).__init__()

        self.former = nn.TransformerEncoderLayer(d_model,8)

    def forward(self, x):
        B, C, H, W = x.shape

        x=x.reshape(B,C,H*W)
        x = x.permute(0, 2, 1)
        x=self.former(x)
        x = x.permute(0, 2, 1)
        x = x.reshape(B, C, H , W)

        return x
class TVResNet50_LWMSA(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone =torchvision.models.resnet50(pretrained=False)


        # 全局特征
        self.convBNRelu1 = ConvBNRelu(512, 256, 3, bias=False)
        self.attn32 =patchNet.LWMSA(256,256)
        # self.attn16 = patchNet.SwinBlock(256, (32,32),16)
        # self.attn8 = patchNet.SwinBlock(256, (32,32),8)
        self.Conv1 = ConvBNRelu(256, 2, 3, bias=False)




        # self.tail=nn.Conv2d(32, 2, 1, bias=False)
    def forward(self, input):
        ############全局特征



        #############局部特征
        x=input
        for name,module in self.backbone.named_children():
            if name=='fc':
                break
            x=module(x)
            if name=="layer1":
                layer1=x
            elif name=="layer2":
                layer2=x
            elif name=="layer3":
                layer3=x
            elif name=="layer4":
                layer4=x
        # layer1 = self.pspblock1(layer1)
        # layer1= F.upsample_bilinear(layer1,(256,256))
        #
        #
        #
        # layer2 = self.pspblock2(layer2)
        # layer2 = F.upsample_bilinear(layer2, (256, 256))

        # # transformer##########################
        g = self.convBNRelu1(layer2)

        # g8 = self.attn8(g)
        # g16 = self.attn16(g)
        g = self.attn32(g)

        # g8 = F.upsample_bilinear(g8, (256, 256))
        # g16 = F.upsample_bilinear(g16, (256, 256))


        # g = torch.cat([g8, g16, g32], dim=1)

        g = self.Conv1(g)

        g = F.upsample_bilinear(g, (256, 256))







        # #####################################################

        # out=self.tail(g)

        return g
class TVResNet50_ShiftPoolingPSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = torchvision.models.resnet50(pretrained=False)
        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.convDepth = nn.Conv2d(2048, 128, 1)
        self.inDepth = 512
        self.outDepth = self.inDepth//4
        self.normal1 = pool_block(self.inDepth, 32, 32, 1, self.outDepth)

        self.normal2 = pool_block(self.inDepth, 32, 32, 2, self.outDepth)
        self.shiftLeft2 = shiftLeftPoolBlock(self.inDepth, 32, 32, 2, self.outDepth)
        self.shiftTop2 = shiftTopPoolBlock(self.inDepth, 32, 32, 2, self.outDepth)
        self.shiftTopLeft2 = shiftTopLeftPoolBlock(self.inDepth, 32, 32, 2, self.outDepth)

        self.normal4 = pool_block(self.inDepth, 32, 32, 4, self.outDepth)
        self.shiftLeft4 = shiftLeftPoolBlock(self.inDepth, 32, 32, 4, self.outDepth)
        self.shiftTop4 = shiftTopPoolBlock(self.inDepth, 32, 32, 4, self.outDepth)
        self.shiftTopLeft4 = shiftTopLeftPoolBlock(self.inDepth, 32, 32, 4, self.outDepth)

        self.normal6 = pool_block(self.inDepth, 32, 32, 6, self.outDepth)
        self.shiftLeft6 = shiftLeftPoolBlock(self.inDepth, 32, 32, 6, self.outDepth)
        self.shiftTop6 = shiftTopPoolBlock(self.inDepth, 32, 32, 6, self.outDepth)
        self.shiftTopLeft6 = shiftTopLeftPoolBlock(self.inDepth, 32, 32, 6, self.outDepth)

        self.convFuse2 = nn.Conv2d(self.outDepth * 4, self.outDepth, 1, bias=False)
        self.convFuse4 = nn.Conv2d(self.outDepth * 4, self.outDepth, 1, bias=False)
        self.convFuse6 = nn.Conv2d(self.outDepth * 4, self.outDepth, 1, bias=False)

        self.convBNRelu = ConvBNRelu(self.outDepth * 8, self.outDepth, 3)

        self.ConvTranspose1 = nn.ConvTranspose2d(self.outDepth, self.outDepth, 2, 2)
        self.ConvTranspose2 = nn.ConvTranspose2d(self.outDepth, self.outDepth, 2, 2)
        self.ConvTranspose3 = nn.ConvTranspose2d(self.outDepth, self.outDepth, 2, 2)

        self.Conv1 = nn.Conv2d(self.outDepth, self.outDepth, 3, bias=True, padding=1)
        self.Conv2 = nn.Conv2d(self.outDepth*2, self.outDepth, 3, bias=True, padding=1)
        self.Conv3 = nn.Conv2d(self.outDepth*3, self.outDepth, 3, bias=True, padding=1)

        self.Conv4 = nn.Conv2d(self.outDepth, 2, 1, bias=True)

    def forward(self, input):
        x = input
        for name, module in self.backbone.named_children():
            if name == 'fc':
                break
            x = module(x)
            if name == "layer1":
                layer1 = x
            elif name == "layer2":
                layer2 = x
            elif name == "layer3":
                layer3 = x
            elif name == "layer4":
                layer4 = x
        # high_level_features=self.backbone (x)
        # 调整通道到128
        # high_level_features = self.convDepth(high_level_features)

        pool_outs = [layer2]

        # p = 1
        pooled = self.normal1(layer2)
        pool_outs.append(pooled)

        # p=2
        # 原始的pool
        normal = self.normal2(layer2)
        # shiftLeft pool
        left = self.shiftLeft2(layer2)
        # shiftTop pool
        top = self.shiftTop2(layer2)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft2(layer2)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse2(o)
        pool_outs.append(o)
        # p=4
        # 原始的pool
        normal = self.normal4(layer2)
        # shiftLeft pool
        left = self.shiftLeft4(layer2)
        # shiftTop pool
        top = self.shiftTop4(layer2)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft4(layer2)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse4(o)
        pool_outs.append(o)
        # p=6
        # 原始的pool
        normal = self.normal6(layer2)
        # shiftLeft pool
        left = self.shiftLeft6(layer2)
        # shiftTop pool
        top = self.shiftTop6(layer2)
        # shiftTopLeft pool
        topleft = self.shiftTopLeft6(layer2)
        o = torch.cat([normal, left, top, topleft], dim=1)
        o = self.convFuse6(o)
        pool_outs.append(o)

        o = torch.cat(pool_outs, dim=1)
        o = self.convBNRelu(o)

        ##改进的上采样方式1

        # up1 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(o)
        # up1 = Conv2D(32, 3, padding='same')(up1)
        up1 = self.ConvTranspose1(o)
        up1 = self.Conv1(up1)

        # up2 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(up1)
        # oto2 = UpSampling2D(4)(o)
        # up2 = Concatenate(axis=3)([oto2, up2])
        # up2 = Conv2D(32, 3, padding='same')(up2)
        up2 = self.ConvTranspose2(up1)
        oto2 = F.upsample_nearest(o, scale_factor=4)
        up2 = torch.cat([oto2, up2], dim=1)
        up2 = self.Conv2(up2)

        # up3 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(up2)
        # oto3 = UpSampling2D(8)(o)
        # up1to3 = UpSampling2D(4)(up1)
        # up3 = Concatenate(axis=3)([oto3, up1to3, up3])
        # up3 = Conv2D(32, 3, padding='same')(up3)
        up3 = self.ConvTranspose3(up2)
        oto3 = F.upsample_nearest(o, scale_factor=8)
        up1to3 = F.upsample_nearest(up1, scale_factor=4)
        up3 = torch.cat([oto3, up1to3, up3], dim=1)
        up3 = self.Conv3(up3)

        # out = Conv2D(classNum, (1, 1), data_format=IMAGE_ORDERING, padding='same')(up3)
        out = self.Conv4(up3)

        return out

class TVResNet50_M_Fully_PSPNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone =torchvision.models.resnet50(pretrained=False)
        # self.backbone =torchvision.models.resnet50()

        # self.backbone=BuildFormer.BuildFormerSegDP()
        self.pspblock1 = PSPBlock(256, 64, 64, 4, 1, 4,6,12)
        self.pspblock2 = PSPBlock(512,  32,  32, 4, 1, 2,3,6)
        # self.pspblock3 = PSPBlock(1024, 16,  16, 4, 1, 2,3,6)
        # self.pspblock4 = PSPBlock(2048, 8,  8, 4, 1, 2,3,6)
        self.fully=nn.Linear(64,64)
        self.Conv4=ConvBNRelu(2048,2)

        # self.pspblock32 = PSPBlock(1024, 32, 32)
        #
        # self.pspblock64 = PSPBlock(512, 64, 64)
        # self.layer2out = ConvBNRelu(128, 2, 3, bias=False)
        # self.pspblock128 = PSPBlock(256, 128, 128)


        #全局特征
        # self.CBRIn = ConvBNRelu(3, 128, 3, bias=False)
        # self.attn = nn.Sequential(
        #     patchNet.LinearAttention(128,reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16),
        #     patchNet.LinearAttention(128, reduce_size=16)
        # )
        # self.CBROut = ConvBNRelu(128, 2, 3, bias=False)
        #
        self.tail=nn.Conv2d(6, 2, 1, bias=False)


    def forward(self, input):
        ############全局特征
        # g=F.interpolate(input,[128,128])
        # g=self.CBRIn(g)
        # g=self.attn(g) #128大小，512通道
        # g=self.CBROut(g)
        # g128 = F.upsample_bilinear(g, [512, 512])

        # g = F.interpolate(input, [64, 64])
        # g = self.CBRIn(g)
        # g = self.attn(g)  # 64大小，512通道
        # g = self.CBROut(g)
        # g64 = F.upsample_bilinear(g, [512, 512])

        # g = F.interpolate(input, [32, 32])
        # g = self.CBRIn(g)
        # g = self.attn(g)  # 32大小，512通道
        # g = self.CBROut(g)
        # g32=F.upsample_bilinear(g,[512,512])


        ##############局部特征
        x = input
        for name, module in self.backbone.named_children():
            if name == 'fc':
                break
            x = module(x)
            if name == "layer1":
                layer1 = x
            elif name == "layer2":
                layer2 = x
            elif name == "layer3":
                layer3 = x
            elif name == "layer4":
                layer4 = x



        layer1 = self.pspblock1(layer1)
        layer1= F.upsample_bilinear(layer1,(256,256))

        layer2 = self.pspblock2(layer2)
        layer2 = F.upsample_bilinear(layer2, (256, 256))

        B,C,H,W=layer4.shape
        layer4=layer4.reshape(B,C,H*W)
        layer4=self.fully(layer4)
        layer4=layer4.reshape(B,C,H,W)
        layer4=self.Conv4(layer4)
        layer4 = F.upsample_bilinear(layer4, (256, 256))

        out=torch.cat([layer1,layer2,layer4],dim=1)
        out=self.tail(out)

        return out
class UNet_Decoder(nn.Module):
    def __init__(self,depths=[32,32,32,32,32]):
        super(UNet_Decoder, self).__init__()



        self.ConvTranspose4_3=nn.ConvTranspose2d(depths[4], depths[3], 2, 2)
        self.Conv4_3= nn.Sequential(ConvBNRelu(depths[3]*2, depths[3]),
                                     ConvBNRelu(depths[3], depths[3]))
        self.ConvTranspose4_2=nn.ConvTranspose2d(depths[3], depths[2], 2, 2)
        self.Conv4_2= nn.Sequential(ConvBNRelu(depths[2]*2, depths[2]),
                                     ConvBNRelu(depths[2], depths[2]))
        self.ConvTranspose4_1=nn.ConvTranspose2d(depths[2], depths[1], 2, 2)
        self.Conv4_1= nn.Sequential(ConvBNRelu(depths[1]*2, depths[1]),
                                     ConvBNRelu(depths[1], depths[1]))
        self.ConvTranspose4_0=nn.ConvTranspose2d(depths[1], depths[0], 2, 2)
        self.Conv4_0=nn.Sequential(ConvBNRelu(depths[0]*2, depths[0]),
                                     ConvBNRelu(depths[0], depths[0]))

        self.ConvOut=nn.Sequential(ConvBNRelu(depths[0], 2),
                                  )

    def forward(self,levels):

        # level4的解码
        level4 = self.ConvTranspose4_3(levels[4])
        level4 = torch.cat([level4, levels[3]], dim=1)
        level4 = self.Conv4_3(level4)

        level4 = self.ConvTranspose4_2(level4)
        level4 = torch.cat([level4, levels[2]], dim=1)
        level4 = self.Conv4_2(level4)

        level4 = self.ConvTranspose4_1(level4)
        level4 = torch.cat([level4, levels[1]], dim=1)
        level4 = self.Conv4_1(level4)

        level4 = self.ConvTranspose4_0(level4)
        level4 = torch.cat([level4, levels[0]], dim=1)
        level4 = self.Conv4_0(level4)

        # 所有level的融合
        # out = torch.cat([level0, level1, level2, level3,level4], dim=1)
        out = self.ConvOut(level4)

        return out

class noShiftWinTransformer(nn.Module):
    def __init__(self,inDim,outDim,inWidth,hideWidth=16,headNum=1,winSize=8):
        super().__init__()
        self.Former1 = swin.SwinTransformerBlock(inDim, (inWidth, inWidth), headNum, winSize, 0)

        self.InterWin = nn.Sequential(
            nn.Linear(inWidth*inWidth, hideWidth),
            ConvBNRelu1D(inDim, outDim, kernel_size=1, padding=0),
            nn.Linear(hideWidth, inWidth*inWidth),
            ConvBNRelu1D(outDim, outDim, kernel_size=1, padding=0),

        )
        self.outDim=outDim

    def forward(self, x):
        B, C, H, W = x.shape
        x = x.reshape(B, C, H * W)
        x=x.permute(0,2,1)
        x=self.Former1(x)
        x = x.permute(0, 2, 1)
        x = self.InterWin(x)
        x = x.reshape(B, self.outDim, H, W)
        return x

class UNet_Fully(nn.Module):
    def __init__(self):
        super().__init__()
        # self.Layer1=nn.Sequential(
        #     ConvBNRelu(3,32),
        #     ConvBNRelu(32, 32)
        # )
        # self.Layer2 = nn.Sequential(
        #     ConvBNRelu(32, 64),
        #     ConvBNRelu(64, 64)
        # )
        # self.Layer3 = nn.Sequential(
        #     ConvBNRelu(64, 128),
        #     ConvBNRelu(128, 128)
        # )
        # self.Layer4 = nn.Sequential(
        #     ConvBNRelu(128, 256),
        #     ConvBNRelu(256, 256)
        # )
        # self.Layer5 = nn.Sequential(
        #     ConvBNRelu(256, 512),
        #     ConvBNRelu(512, 512)
        # )

        self.decoder=UNet_Decoder(depths=[32,64,128,256,512])

        self.layer1=nn.Sequential(
            noShiftWinTransformer(3,  32, 256, 16, 1, 8),
            noShiftWinTransformer(32, 32, 256, 16, 1, 8),
        )

        self.layer2 = nn.Sequential(
            noShiftWinTransformer(32, 64, 128, 16, 4, 8),
            noShiftWinTransformer(64, 64, 128, 16, 4, 8),
        )

        self.layer3 = nn.Sequential(
            noShiftWinTransformer(64, 128, 64, 16, 4, 8),
            noShiftWinTransformer(128, 128, 64, 16, 4, 8),
        )
        self.layer4 = nn.Sequential(
            noShiftWinTransformer(128, 256, 32, 16, 8, 8),
            noShiftWinTransformer(256, 256, 32, 16, 8, 8),
            noShiftWinTransformer(256, 256, 32, 16, 8, 8),
            noShiftWinTransformer(256, 256, 32, 16, 8, 8),
            noShiftWinTransformer(256, 256, 32, 16, 8, 8),
            noShiftWinTransformer(256, 256, 32, 16, 8, 8),
        )
        self.layer5 = nn.Sequential(
            noShiftWinTransformer(256, 512, 16, 16, 8, 8),
            noShiftWinTransformer(512, 512, 16, 16, 8, 8),
        )

    def forward(self, input):

        layer1=self.layer1(input)

        x=F.max_pool2d(layer1,2)
        layer2=self.layer2(x)

        x = F.max_pool2d(layer2, 2)
        layer3=self.layer3(x)

        x = F.max_pool2d(layer3, 2)
        layer4=self.layer4(x)

        x = F.max_pool2d(layer4, 2)
        layer5=self.layer5(x)


        out=self.decoder([layer1,layer2,layer3,layer4,layer5])

        return out
class NWinFormer(nn.Module):
    def __init__(self):
        super().__init__()
        # self.Layer1=nn.Sequential(
        #     ConvBNRelu(3,32),
        #     ConvBNRelu(32, 32)
        # )
        # self.Layer2 = nn.Sequential(
        #     ConvBNRelu(32, 64),
        #     ConvBNRelu(64, 64)
        # )
        # self.Layer3 = nn.Sequential(
        #     ConvBNRelu(64, 128),
        #     ConvBNRelu(128, 128)
        # )
        # self.Layer4 = nn.Sequential(
        #     ConvBNRelu(128, 256),
        #     ConvBNRelu(256, 256)
        # )
        # self.Layer5 = nn.Sequential(
        #     ConvBNRelu(256, 512),
        #     ConvBNRelu(512, 512)
        # )

        self.decoder=UNet_Decoder(depths=[32,64,128,256,512])

        self.layer1=nn.Sequential(
            noShiftWinTransformer(3, 32, 256, 32, 1, 8),
            noShiftWinTransformer(32, 32, 256, 32, 4, 8),
        )

        self.layer2 = nn.Sequential(
            noShiftWinTransformer(32, 64, 128, 32, 4, 8),
            noShiftWinTransformer(64, 64, 128, 32, 4, 8),
        )

        self.layer3 = nn.Sequential(
            noShiftWinTransformer(64, 128, 64, 32, 4, 8),
            noShiftWinTransformer(128, 128, 64, 32, 4, 8),
        )
        self.layer4 = nn.Sequential(
            noShiftWinTransformer(128, 256, 32, 32, 8, 8),
            noShiftWinTransformer(256, 256, 32, 32, 8, 8),


        )
        self.layer5 = nn.Sequential(
            noShiftWinTransformer(256, 512, 16, 16, 8, 8),
            noShiftWinTransformer(512, 512, 16, 16, 8, 8),

        )


    def forward(self, input):



        layer1=self.layer1(input)

        x=F.max_pool2d(layer1,2)
        layer2=self.layer2(x)

        x = F.max_pool2d(layer2, 2)
        layer3=self.layer3(x)

        x = F.max_pool2d(layer3, 2)
        layer4=self.layer4(x)

        x = F.max_pool2d(layer4, 2)
        layer5=self.layer5(x)


        out=self.decoder([layer1,layer2,layer3,layer4,layer5])
        # out=F.interpolate(out,(256,256))
        return out