from torch import nn
import torch
from einops import rearrange
import numbers
from timm.layers import create_attn, create_aa
from einops.layers.torch import Rearrange
import math
from typing import Optional, Type


class Conv2d_cd(nn.Module):
    def __init__(self, dim, kernel_size=3, groups=1, dilation=1, bias=False, theta=1.0):
        super(Conv2d_cd, self).__init__()
        if isinstance(kernel_size, int):
            kernel_size = (kernel_size, kernel_size)

        pad = (dilation * kernel_size[0] // 2, dilation * kernel_size[1] // 2)
        self.conv = nn.Conv2d(dim, dim, kernel_size=kernel_size, stride=1,
                              padding=pad,
                              dilation=dilation, groups=groups, bias=bias)
        self.theta = theta

    def get_weight(self):
        conv_weight = self.conv.weight
        dim, g, kx, ky = conv_weight.shape
        conv_weight = Rearrange('c_in c_out k1 k2 -> c_in c_out (k1 k2)')(conv_weight)
        conv_weight_cd = (torch.cuda.FloatTensor(dim, g, kx * ky).fill_(0)).to(
            conv_weight.device).to(conv_weight.dtype)
        conv_weight_cd[:, :, :] = conv_weight[:, :, :]
        conv_weight_cd[:, :, (kx * ky // 2)] = conv_weight[:, :, (kx * ky // 2)] - conv_weight[:, :, :].sum(2)
        conv_weight_cd = Rearrange('c_in c_out (k1 k2) -> c_in c_out k1 k2', k1=kx, k2=ky)(
            conv_weight_cd)
        return conv_weight_cd, self.conv.bias


class Conv1d_cd(nn.Module):
    def __init__(self, dim, kernel_size=11, groups=1, dilation=1, bias=False, theta=1.0):
        super(Conv1d_cd, self).__init__()
        self.conv = nn.Conv1d(dim, dim, kernel_size=kernel_size, stride=1,
                              padding=dilation * (kernel_size - 1) // 2,
                              dilation=dilation, groups=groups, bias=bias)
        self.theta = theta

    def get_weight(self):
        conv_weight = self.conv.weight
        dim, g, k = conv_weight.shape
        conv_weight_cd = (torch.cuda.FloatTensor(dim, g, k).fill_(0)).to(
            conv_weight.device).to(conv_weight.dtype)
        conv_weight_cd[:, :, :] = conv_weight[:, :, :]
        conv_weight_cd[:, :, (k // 2)] = conv_weight[:, :, (k // 2)] - conv_weight[:, :, :].sum(2)
        return conv_weight_cd, self.conv.bias


class Conv2d_ad(nn.Module):
    def __init__(self, dim, kernel_size=3, groups=1, dilation=1, bias=False, theta=1.0):
        super(Conv2d_ad, self).__init__()
        self.conv = nn.Conv2d(dim, dim, kernel_size=kernel_size, stride=1,
                              padding=dilation * (kernel_size - 1) // 2,
                              dilation=dilation, groups=groups, bias=bias)
        self.theta = theta

    def get_weight(self):
        conv_weight = self.conv.weight
        dim, g, kx, ky = conv_weight.shape
        conv_weight = Rearrange('c_in c_out k1 k2 -> c_in c_out (k1 k2)')(conv_weight)
        if kx == 3:
            conv_weight_ad = conv_weight - self.theta * conv_weight[:, :, [3, 0, 1, 6, 4, 2, 7, 8, 5]]
        elif kx == 5:
            conv_weight_ad = conv_weight - self.theta * conv_weight[:, :,
                                                        [5, 0, 1, 2, 3, 10, 6, 7, 8, 4, 15, 11, 12, 13, 9, 20, 16, 17,
                                                         18, 14, 21, 22, 23, 24, 19]]
        conv_weight_ad = Rearrange('c_in c_out (k1 k2) -> c_in c_out k1 k2', k1=kx, k2=ky)(conv_weight_ad)
        return conv_weight_ad, self.conv.bias


class Conv2d_hd(nn.Module):
    def __init__(self, dim, kernel_size=3, groups=1, dilation=1, bias=False):
        super(Conv2d_hd, self).__init__()
        self.conv = nn.Conv1d(dim, dim, kernel_size=kernel_size, stride=1, padding=dilation * (kernel_size - 1) // 2,
                              dilation=dilation, groups=groups, bias=bias)

    def get_weight(self):
        conv_weight = self.conv.weight
        dim, g, k = conv_weight.shape
        conv_weight_hd = (torch.cuda.FloatTensor(dim, g, k ** 2).fill_(0)).to(conv_weight.device).to(conv_weight.dtype)
        if k == 3:
            conv_weight_hd[:, :, [0, 3, 6]] = conv_weight[:, :, :]
            conv_weight_hd[:, :, [2, 5, 8]] = -conv_weight[:, :, :]
        elif k == 5:
            conv_weight_hd[:, :, [0, 5, 10, 15, 20]] = conv_weight[:, :, :]
            # conv_weight_hd[:, :, [1, 6, 11, 16, 21]] = conv_weight[:, :, :]
            # conv_weight_hd[:, :, [3, 8, 13, 18, 23]] = -conv_weight[:, :, :]
            conv_weight_hd[:, :, [4, 9, 14, 19, 24]] = -conv_weight[:, :, :]
        conv_weight_hd = Rearrange('c_in c_out (k1 k2) -> c_in c_out k1 k2', k1=k, k2=k)(
            conv_weight_hd)
        return conv_weight_hd, self.conv.bias


class Conv2d_vd(nn.Module):
    def __init__(self, dim, kernel_size=3, groups=1, dilation=1, bias=False):
        super(Conv2d_vd, self).__init__()
        self.conv = nn.Conv1d(dim, dim, kernel_size=kernel_size, stride=1, padding=dilation * (kernel_size - 1) // 2,
                              dilation=dilation, groups=groups, bias=bias)

    def get_weight(self):
        conv_weight = self.conv.weight
        dim, g, k = conv_weight.shape
        conv_weight_vd = (torch.cuda.FloatTensor(dim, g, k ** 2).fill_(0)).to(conv_weight.device).to(conv_weight.dtype)
        if k == 3:
            conv_weight_vd[:, :, [0, 1, 2]] = conv_weight[:, :, :]
            conv_weight_vd[:, :, [6, 7, 8]] = -conv_weight[:, :, :]
        elif k == 5:
            conv_weight_vd[:, :, [0, 1, 2, 3, 4]] = conv_weight[:, :, :]
            # conv_weight_vd[:, :, [5, 6, 7, 8, 9]] = conv_weight[:, :, :]
            # conv_weight_vd[:, :, [15, 16, 17, 18, 19]] = -conv_weight[:, :, :]
            conv_weight_vd[:, :, [20, 21, 22, 23, 24]] = -conv_weight[:, :, :]
        conv_weight_vd = Rearrange('c_in c_out (k1 k2) -> c_in c_out k1 k2', k1=k, k2=k)(
            conv_weight_vd)
        return conv_weight_vd, self.conv.bias


class DEConv(nn.Module):
    def __init__(self, dim, kernel_size=3, groups=1, dilation=1, bias=True):
        super(DEConv, self).__init__()
        self.groups = groups
        self.conv1_1 = Conv2d_cd(dim, kernel_size, groups=groups, dilation=dilation, bias=bias)
        self.conv1_2 = Conv2d_hd(dim, kernel_size, groups=groups, dilation=dilation, bias=bias)
        self.conv1_3 = Conv2d_vd(dim, kernel_size, groups=groups, dilation=dilation, bias=bias)
        self.conv1_4 = Conv2d_ad(dim, kernel_size, groups=groups, dilation=dilation, bias=bias)
        self.conv1_5 = nn.Conv2d(dim, dim, kernel_size, groups=groups, padding=dilation * (kernel_size - 1) // 2,
                                 dilation=dilation, bias=True)
        self.pad = kernel_size // 2

    def forward(self, x):
        w1, b1 = self.conv1_1.get_weight()
        w2, b2 = self.conv1_2.get_weight()
        w3, b3 = self.conv1_3.get_weight()
        w4, b4 = self.conv1_4.get_weight()
        w5, b5 = self.conv1_5.weight, self.conv1_5.bias

        w = w1 + w2 + w3 + w4 + w5
        b = b1 + b2 + b3 + b4 + b5
        # todo 修改为测试模块，替换为普通卷积
        res = nn.functional.conv2d(input=x, weight=w, bias=b, stride=1, padding=self.pad, groups=self.groups)

        return res


######################DEConv end#######################


#########################Neck start##########################
class Bottleneck(nn.Module):
    expansion = 4

    def __init__(
            self,
            inplanes: int,
            planes: int,
            stride: int = 1,
            downsample: Optional[nn.Module] = None,
            cardinality: int = 1,
            base_width: int = 64,
            reduce_first: int = 1,
            dilation: int = 1,
            first_dilation: Optional[int] = None,
            act_layer: Type[nn.Module] = nn.ReLU,
            norm_layer: Type[nn.Module] = nn.BatchNorm2d,
            attn_layer: Optional[Type[nn.Module]] = None,
            aa_layer: Optional[Type[nn.Module]] = None,
            drop_block: Optional[Type[nn.Module]] = None,
            drop_path: Optional[nn.Module] = None,
    ):
        """
        Args:
            inplanes: Input channel dimensionality.
            planes: Used to determine output channel dimensionalities.
            stride: Stride used in convolution layers.
            downsample: Optional downsample layer for residual path.
            cardinality: Number of convolution groups.
            base_width: Base width used to determine output channel dimensionality.
            reduce_first: Reduction factor for first convolution output width of residual blocks.
            dilation: Dilation rate for convolution layers.
            first_dilation: Dilation rate for first convolution layer.
            act_layer: Activation layer.
            norm_layer: Normalization layer.
            attn_layer: Attention layer.
            aa_layer: Anti-aliasing layer.
            drop_block: Class for DropBlock layer.
            drop_path: Optional DropPath layer.
        """
        super(Bottleneck, self).__init__()

        width = int(math.floor(planes * (base_width / 64)) * cardinality)
        first_planes = width // reduce_first
        outplanes = planes * self.expansion
        first_dilation = first_dilation or dilation
        use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)

        self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False)
        self.bn1 = norm_layer(first_planes)
        self.act1 = act_layer(inplace=True)

        self.conv2 = nn.Conv2d(
            first_planes, width, kernel_size=3, stride=1 if use_aa else stride,
            padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)
        self.bn2 = norm_layer(width)
        self.drop_block = drop_block() if drop_block is not None else nn.Identity()
        self.act2 = act_layer(inplace=True)
        self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa)

        self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False)
        self.bn3 = norm_layer(outplanes)

        self.se = create_attn(attn_layer, outplanes)

        self.act3 = act_layer(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        self.drop_path = drop_path

    def zero_init_last(self):
        if getattr(self.bn3, 'weight', None) is not None:
            nn.init.zeros_(self.bn3.weight)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        shortcut = x

        x = self.conv1(x)
        x = self.bn1(x)
        x = self.act1(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = self.drop_block(x)
        x = self.act2(x)
        x = self.aa(x)

        x = self.conv3(x)
        x = self.bn3(x)

        if self.se is not None:
            x = self.se(x)

        if self.drop_path is not None:
            x = self.drop_path(x)

        if self.downsample is not None:
            shortcut = self.downsample(shortcut)
        x += shortcut
        x = self.act3(x)

        return x


class Neck(nn.Module):
    def __init__(self, block, layers, width_per_group=64, norm_layer=None, ):
        super(Neck, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        self._norm_layer = norm_layer
        self.base_width = width_per_group
        self.inplanes = 64 * block.expansion
        self.dilation = 1
        self.bn_layer = self._make_layer(block, 128, layers, stride=2)

        self.conv1 = nn.Conv2d(16 * block.expansion, 32 * block.expansion, kernel_size=3, stride=2, padding=1,
                               bias=False)
        self.bn1 = norm_layer(32 * block.expansion)
        self.conv2 = nn.Conv2d(32 * block.expansion, 64 * block.expansion, kernel_size=3, stride=2, padding=1,
                               bias=False)
        self.bn2 = norm_layer(64 * block.expansion)
        self.conv21 = nn.Conv2d(32 * block.expansion, 32 * block.expansion, 1)
        self.bn21 = norm_layer(32 * block.expansion)
        self.conv31 = nn.Conv2d(64 * block.expansion, 64 * block.expansion, 1)
        self.bn31 = norm_layer(64 * block.expansion)
        self.convf = nn.Conv2d(64 * block.expansion, 64 * block.expansion, 1)
        self.bnf = norm_layer(64 * block.expansion)
        self.relu = nn.ReLU(inplace=True)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
        norm_layer = self._norm_layer
        downsample = None
        previous_dilation = self.dilation
        if dilate:
            self.dilation *= stride
            stride = 1
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride,
                                                 bias=False),
                                       norm_layer(planes * block.expansion), )
        layers = []
        layers.append(
            block(self.inplanes, planes, stride, downsample, base_width=self.base_width, dilation=previous_dilation,
                  norm_layer=norm_layer))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
        return nn.Sequential(*layers)

    def forward(self, x):
        fpn0 = self.relu(self.bn1(self.conv1(x[0])))
        fpn1 = self.relu(self.bn21(self.conv21(x[1]))) + fpn0
        sv_features = self.relu(self.bn2(self.conv2(fpn1))) + self.relu(self.bn31(self.conv31(x[2])))
        sv_features = self.relu(self.bnf(self.convf(sv_features)))
        sv_features = self.bn_layer(sv_features)
        return sv_features.contiguous()


#########################Neck end##########################

def ln(dim):
    return LayerNorm(dim, "WithBias")


def to_3d(x):
    return rearrange(x, 'b c h w -> b (h w) c')


def to_4d(x, h, w):
    return rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)


class BiasFree_LayerNorm(nn.Module):

    def __init__(self, normalized_shape):
        super(BiasFree_LayerNorm, self).__init__()
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = (normalized_shape,)
        normalized_shape = torch.Size(normalized_shape)

        assert len(normalized_shape) == 1

        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.normalized_shape = normalized_shape

    def forward(self, x):
        sigma = x.var(-1, keepdim=True, unbiased=False)
        return x / torch.sqrt(sigma + 1e-5) * self.weight


class WithBias_LayerNorm(nn.Module):

    def __init__(self, normalized_shape):
        super(WithBias_LayerNorm, self).__init__()
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = (normalized_shape,)
        normalized_shape = torch.Size(normalized_shape)

        assert len(normalized_shape) == 1

        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))
        self.normalized_shape = normalized_shape

    def forward(self, x):
        mu = x.mean(-1, keepdim=True)
        sigma = x.var(-1, keepdim=True, unbiased=False)
        return (x - mu) / torch.sqrt(sigma + 1e-5) * self.weight + self.bias


class LayerNorm(nn.Module):
    def __init__(self, dim, LayerNorm_type):
        super(LayerNorm, self).__init__()
        if LayerNorm_type == 'BiasFree':
            self.body = BiasFree_LayerNorm(dim)
        else:
            self.body = WithBias_LayerNorm(dim)

    def forward(self, x):
        h, w = x.shape[-2:]
        return to_4d(self.body(to_3d(x)), h, w)


class Upsample(nn.Module):
    def __init__(self, in_channels, out_channels, scale_factor=2, mode='transpose'):
        super(Upsample, self).__init__()
        self.scale_factor = scale_factor
        if mode == 'bilinear':
            self.upsample = nn.Sequential(
                nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True),
                nn.Conv2d(in_channels, out_channels, 1)
            )
        if mode == 'transpose':
            self.upsample = nn.Sequential(
                nn.ConvTranspose2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1),
                nn.InstanceNorm2d(out_channels),
                nn.SiLU(inplace=True)
            )

    def forward(self, x):
        return self.upsample(x)


class ChannelAttention(nn.Module):
    def __init__(self, in_planes, ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        self.mlp = nn.Sequential(
            nn.Conv2d(in_planes, in_planes // ratio, kernel_size=1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_planes // ratio, in_planes, kernel_size=1, bias=False),
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = self.mlp(self.avg_pool(x))
        max_out = self.mlp(self.max_pool(x))
        attn = self.sigmoid(avg_out + max_out)
        return x * attn


class DetailEnhanceBlock(nn.Module):
    def __init__(self, dim, ratio=16):
        super(DetailEnhanceBlock, self).__init__()
        self.ca = ChannelAttention(dim, ratio)
        self.deconv = nn.Sequential(DEConv(dim), nn.InstanceNorm2d(dim), nn.SiLU(inplace=True))
        self.act = nn.SiLU(inplace=True)
        self.conv = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1), nn.InstanceNorm2d(dim),
                                  nn.SiLU(inplace=True))
        self.final_conv = nn.Conv2d(dim, dim, 1)

    def forward(self, x):
        x = self.ca(x)
        o = x
        x_de = self.deconv(x)
        x_de = x_de + o
        x_v = self.conv(x_de)
        x_v = x_v + o
        out = self.final_conv(x_v)
        return out


class StripAttn(nn.Module):
    def __init__(self, dim, strip_kernel_size, num_head):
        super(StripAttn, self).__init__()
        self.num_head = num_head
        self.temperature = nn.Parameter(torch.ones(num_head, 1, 1))
        self.norm = ln(dim)
        self.project_in = nn.Conv2d(dim, dim, 1)
        self.qkv1 = nn.Conv2d(dim, dim, kernel_size=(strip_kernel_size, 1), stride=1,
                              padding=(strip_kernel_size // 2, 0), groups=dim, bias=False)
        self.qkv2 = nn.Conv2d(dim, dim, kernel_size=(1, strip_kernel_size), stride=1,
                              padding=(0, strip_kernel_size // 2), groups=dim, bias=False)
        self.project_out = nn.Conv2d(dim, dim, 1)
        self.project = nn.Sequential(
            nn.Conv2d(dim, dim, 1),
            nn.InstanceNorm2d(dim),
            nn.SiLU(inplace=True),
        )

    def forward(self, x):
        b, c, h, w = x.shape
        x = self.norm(self.project_in(x))
        qkv1 = self.qkv1(x)
        qkv2 = self.qkv2(x)
        k1 = rearrange(qkv1, 'b (head c) h w -> b head h (w c)', head=self.num_head)
        v1 = rearrange(qkv1, 'b (head c) h w -> b head h (w c)', head=self.num_head)
        k2 = rearrange(qkv2, 'b (head c) h w -> b head w (h c)', head=self.num_head)
        v2 = rearrange(qkv2, 'b (head c) h w -> b head w (h c)', head=self.num_head)
        q2 = rearrange(qkv1, 'b (head c) h w -> b head w (h c)', head=self.num_head)
        q1 = rearrange(qkv2, 'b (head c) h w -> b head h (w c)', head=self.num_head)
        q1 = torch.nn.functional.normalize(q1, dim=-1)
        q2 = torch.nn.functional.normalize(q2, dim=-1)
        k1 = torch.nn.functional.normalize(k1, dim=-1)
        k2 = torch.nn.functional.normalize(k2, dim=-1)
        attn1 = (q1 @ k1.transpose(-2, -1)) * self.temperature
        attn1 = attn1.softmax(dim=-1)
        out3 = (attn1 @ v1) + q1
        attn2 = (q2 @ k2.transpose(-2, -1)) * self.temperature
        attn2 = attn2.softmax(dim=-1)
        out4 = (attn2 @ v2) + q2
        out3 = rearrange(out3, 'b head h (w c) -> b (head c) h w', head=self.num_head, h=h, w=w)
        out4 = rearrange(out4, 'b head w (h c) -> b (head c) h w', head=self.num_head, h=h, w=w)
        out = self.project_out(out3) + self.project_out(out4) + x
        out = self.project(out)
        return out


class StripGlobalLocalBlock(nn.Module):
    def __init__(self, dim, global_blks, strip_kernel_size, num_head):
        super(StripGlobalLocalBlock, self).__init__()
        self.global_attn = nn.ModuleList(
            [StripAttn(dim, strip_kernel_size, num_head) for _ in range(global_blks)])

        self.local_attn1 = nn.Sequential(
            nn.Conv2d(dim, dim, kernel_size=1),
            nn.InstanceNorm2d(dim),
            nn.SiLU(inplace=True),
            nn.Conv2d(dim, dim, kernel_size=5, padding=2, groups=dim, bias=False),
            nn.InstanceNorm2d(dim),
            nn.SiLU(inplace=True),
            nn.Conv2d(dim, dim, kernel_size=1),
            nn.InstanceNorm2d(dim),
            nn.SiLU(inplace=True),
        )
        self.local_attn2 = nn.Sequential(
            nn.Conv2d(dim, dim, kernel_size=1),
            nn.InstanceNorm2d(dim),
            nn.SiLU(inplace=True),
            nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim, bias=False),
            nn.InstanceNorm2d(dim),
            nn.SiLU(inplace=True),
            nn.Conv2d(dim, dim, kernel_size=1),
            nn.InstanceNorm2d(dim),
            nn.SiLU(inplace=True),
        )
        self.final_conv = nn.Conv2d(in_channels=dim * 3, out_channels=dim, kernel_size=1)

    def forward(self, x):
        origin_x = x
        strip_attn = x
        for b in self.global_attn:
            strip_attn = b(strip_attn)
        local_attn1 = self.local_attn1(x)
        local_attn2 = self.local_attn2(x)
        output = self.final_conv(torch.cat([strip_attn, local_attn1, local_attn2], dim=1))
        out = output + origin_x
        return out


class FEBlock(nn.Module):
    def __init__(self, in_channels, global_blks, strip_kernel_size, num_head):
        super(FEBlock, self).__init__()
        self.SGL = StripGlobalLocalBlock(in_channels, global_blks, strip_kernel_size, num_head)
        self.DEH = DetailEnhanceBlock(in_channels)

    def forward(self, x):
        x = self.SGL(x)
        x = self.DEH(x)
        return x


class FEModule(nn.Module):
    def __init__(self, in_channels, out_channels, blks, global_blks, strip_kernel_size, num_head, up=True):
        super(FEModule, self).__init__()
        if up:
            self.up = Upsample(in_channels, out_channels)
        else:
            self.up = nn.Identity()
        self.strip_blocks = nn.ModuleList([
            FEBlock(out_channels, global_blks, strip_kernel_size, num_head) for _ in
            range(blks)
        ])
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                nn.init.xavier_uniform_(m.weight)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        o = self.up(x)
        for b in self.strip_blocks:
            o = b(o)
        return o


class StripTSModel(nn.Module):
    def __init__(self, encoder_name='resnet34', dims=[64, 128, 256], blks=[3, 4, 6, 3],
                 strip_kernel_sizes=[5, 7, 11, 21], global_blks=[2, 2, 2, 2]):
        super(StripTSModel, self).__init__()
        from .pretrained_net import create_model
        self.feature_extractor = create_model(name=encoder_name)
        for param in self.feature_extractor.parameters():
            param.requires_grad = False
        self.neck = Neck(Bottleneck, 3)
        self.block4 = FEModule(in_channels=dims[2] * 2, out_channels=dims[2] * 2, blks=blks[0],
                               global_blks=global_blks[0],
                               strip_kernel_size=strip_kernel_sizes[0],
                               num_head=8, up=False)
        self.block3 = FEModule(in_channels=dims[2] * 2, out_channels=dims[2], blks=blks[1], global_blks=global_blks[1],
                               strip_kernel_size=strip_kernel_sizes[1],
                               num_head=8)
        self.block2 = FEModule(in_channels=dims[2], out_channels=dims[1], blks=blks[2], global_blks=global_blks[2],
                               strip_kernel_size=strip_kernel_sizes[2],
                               num_head=4)
        self.block1 = FEModule(in_channels=dims[1], out_channels=dims[0], blks=blks[3], global_blks=global_blks[3],
                               strip_kernel_size=strip_kernel_sizes[3],
                               num_head=2)

    def forward(self, x):
        feats_t = self.feature_extractor(x)
        feats_t = [f.detach() for f in feats_t]
        f4 = self.block4(self.neck(feats_t))
        f3 = self.block3(f4)
        f2 = self.block2(f3)
        f1 = self.block1(f2)

        return feats_t, [f1, f2, f3]


if __name__ == '__main__':
    model = StripTSModel().cuda()
    print(model)
    input = torch.randn(1, 3, 256, 256).cuda()

    from thop import profile, clever_format

    flops, params = profile(model, inputs=(input,))
    flops, params = clever_format([flops, params], '%.3f')
    print(f"运算量：{flops}, 参数量：{params}")
