from torch import nn
import torch
from timm.models.layers import DropPath, trunc_normal_


class Block(nn.Module):
    def __init__(self, dim, square, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU6, drop_path=0.):
        super(Block, self).__init__()
        self.project_in = nn.Sequential(nn.Conv2d(dim, dim, 1),
                                        norm_layer(dim),
                                        act_layer(inplace=True))

        if square[0] == square[1]:
            square = square[0]
            self.linear = nn.Sequential(
                nn.LayerNorm(square),
                nn.Linear(square, square * 4, bias=False),
                nn.Linear(square * 4, square, bias=False),
                nn.Sigmoid()
            )
        else:
            self.linear1 = nn.Sequential(
                nn.LayerNorm(square[0]),
                nn.Linear(square[0], square[0] * 4, bias=False),
                nn.Linear(square[0] * 4, square[0], bias=False),
                nn.Sigmoid()
            )

            self.linear2 = nn.Sequential(
                nn.LayerNorm(square[1]),
                nn.Linear(square[1], square[1] * 4, bias=False),
                nn.Linear(square[1] * 4, square[1], bias=False),
                nn.Sigmoid()
            )
        self.square = square
        self.project_out = nn.Sequential(nn.Conv2d(dim, dim, 1),
                                         norm_layer(dim),
                                         act_layer(inplace=True))
        self.dwconv = nn.Sequential(nn.Conv2d(dim, dim, 7, 1, 3, groups=dim),
                                    norm_layer(dim),
                                    act_layer(inplace=True))
        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()

    def forward(self, x):
        input = x
        b, c, h, w = x.shape
        x = self.project_in(x)
        x_h = torch.mean(x, dim=3, keepdim=True).view(b, c, h)
        x_w = torch.mean(x, dim=2, keepdim=True).view(b, c, w)
        if isinstance(self.square, int):
            x_h = self.linear(x_h).view(b, c, h, 1)
            x_w = self.linear(x_w).view(b, c, 1, w)
        elif isinstance(self.square, list):
            x_h = self.linear1(x_h).view(b, c, h, 1)
            x_w = self.linear2(x_w).view(b, c, 1, w)
        x = x * x_h * x_w

        x = self.project_out(x)
        x = self.dwconv(x)
        x = input + self.drop_path(x)
        return x


class GlobalNet(nn.Module):
    def __init__(self, base_dim=32, depths=[3, 3, 12, 5], img_size=[256, 1600], drop_path_rate=0.0, num_classes=100):
        super(GlobalNet, self).__init__()
        self.num_classes = num_classes
        self.in_channel = base_dim
        norm_layer = nn.BatchNorm2d
        act_layer = nn.ReLU6
        if isinstance(img_size, int):
            img_size = [img_size, img_size]
        # stem layer
        self.stem = nn.Sequential(
            nn.Conv2d(3, base_dim, 3, 2, 1),
            norm_layer(base_dim),
            act_layer(inplace=True)
        )
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]  # stochastic depth
        # build stages
        self.stages = nn.ModuleList()
        cur = 0
        for i_layer in range(len(depths)):
            embed_dim = base_dim * 2 ** i_layer
            down_sampler = nn.Sequential(nn.Conv2d(self.in_channel, embed_dim, 3, 2, 1), norm_layer(embed_dim))
            self.in_channel = embed_dim
            blocks = [Block(self.in_channel, [t // (2 ** (i_layer + 2)) for t in img_size], norm_layer, act_layer,
                            dpr[cur + i]) for
                      i
                      in
                      range(depths[i_layer])]
            cur += depths[i_layer]
            self.stages.append(nn.Sequential(down_sampler, *blocks))
        # head
        self.norm = norm_layer(self.in_channel)
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.head = nn.Linear(self.in_channel, num_classes)
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear or nn.Conv2d):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm or nn.BatchNorm2d):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def forward(self, x):
        x = self.stem(x)
        for stage in self.stages:
            x = stage(x)
        x = torch.flatten(self.avgpool(self.norm(x)), 1)
        return self.head(x)


def global_s1():
    model = GlobalNet(24, [2, 2, 8, 3])
    return model


def global_s2():
    model = GlobalNet(32, [1, 2, 6, 2])
    return model


def global_s3():
    model = GlobalNet(40, [2, 2, 8, 4])
    return model


def global_s4():
    model = GlobalNet(48, [2, 2, 12, 5])
    return model


def global_s050():
    model = GlobalNet(16, [1, 1, 3, 1])
    return model


def global_s100():
    model = GlobalNet(20, [1, 2, 4, 1])
    return model


def global_s150():
    model = GlobalNet(24, [1, 2, 4, 2])
    return model


if __name__ == '__main__':
    from thop import profile, clever_format
    import torchvision
    import timm
    from timm import create_model

    print(timm.list_models("*mobile*"))

    x = torch.randn((1, 3, 256, 1600))
    # model = torchvision.models.(pretrained=False,num_classes=10)
    model = global_s4()
    flops, params = profile(model, inputs=(x,))
    flops, params = clever_format([flops, params], '%.3f')
    print(f"运算量：{flops}, 参数量：{params}")
