import torch

from .config import CLS_CHANNEL, CLS_LAYERS


class ClassifyHeader(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.simplify = torch.nn.Conv3d(in_channels=5120, out_channels=CLS_CHANNEL, kernel_size=(1, 1, 1), padding=(0, 0, 0), groups=4)
        self.deep = torch.nn.Sequential(*[Block() for _ in range(CLS_LAYERS)])
        self.attn = Attn()
        self.mid = create_mid()
        self.dropout = torch.nn.Dropout3d(p=0.3)
        self.mapper = torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=4, kernel_size=(1, 1, 1), padding=(0, 0, 0), groups=4)
        self.active = torch.nn.Sigmoid()

    def forward(self, features: torch.Tensor):
        # 然后开始我们的 3D 分类任务流程
        output = self.process(features)
        return output

    def process(self, feature: torch.Tensor) -> torch.Tensor:
        # 第一步，随机丢弃特征层，注意，是整层整层的丢弃哦
        # feature: [1, 4096, z, 16, 16]
        channels = feature.shape[1]
        dropout_channels = int(0.4 * channels)
        feature[:, torch.randint(low=0, high=channels - 1, size=(dropout_channels,)), :, :, :] = 0

        # 第二步，特征降维
        # feature: [1, CLS_CHANNEL, z, 16, 16]
        feature = self.simplify(feature)

        # 第二步，连续深度卷积，串联 z 轴信息
        # deep_feature: [1, CLS_CHANNEL, z, 16, 16]
        deep_feature = self.deep(feature)

        # 第三步，引入注意力机制，训练关键层
        # weights: [1, CLS_CHANNEL, z, 1, 1]
        weights = self.attn(feature)

        # 第四步，把前面这三个东西组合起来
        # feature: [1, CLS_CHANNEL, z, 1, 1]
        feature = (deep_feature - feature + feature.mean()) * weights

        # 第五步，z 轴降维
        # feature: [1, CLS_CHANNEL, z // 8, 1, 1]
        feature = self.mid(feature)

        # 第六步，随机丢弃
        # feature: [1, CLS_CHANNEL, z // 8, 1, 1]
        feature = self.dropout(feature)

        # 第七步，在 x、y、z 上池化
        # pool_feature: [1, CLS_CHANNEL, 1, 1, 1]
        pool_feature = torch.max_pool3d(feature, kernel_size=feature.shape[2:])

        # 第八步，分类
        # output: [1, cls_channel]
        output = self.mapper(pool_feature)[:, :, 0, 0, 0]

        # 第九步，激活
        # output: [1, cls_channel]
        output = self.active(output)
        return output


class Attn(torch.nn.Module):
    def __init__(self):
        super(Attn, self).__init__()
        self.block1 = Block()
        self.block2 = Block()

    def forward(self, inputs: torch.Tensor):
        x = self.block1(inputs)
        x = self.block2(x)
        x = x - x.mean()
        x = torch.abs(x - x.std()) + torch.abs(x + x.std())
        x = x * torch.abs(inputs - inputs.mean())
        x = torch.square(x).mean(dim=(3, 4))[:, :, :, None, None]
        return torch.sqrt(x)


class Block(torch.nn.Module):

    def __init__(self):
        super(Block, self).__init__()
        self.sequential = torch.nn.Sequential(
            # 串维 + 近卷积 + 远卷积 + 串维 + 激活 = block
            torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=CLS_CHANNEL, kernel_size=(1, 1, 1), padding=(0, 0, 0), groups=4),
            torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=CLS_CHANNEL, kernel_size=(7, 3, 3), padding=(3, 1, 1), groups=CLS_CHANNEL),
            torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=CLS_CHANNEL, kernel_size=(7, 3, 3), padding=(12, 2, 2), groups=CLS_CHANNEL, dilation=(4, 2, 2)),
            torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=CLS_CHANNEL, kernel_size=(1, 1, 1), padding=(0, 0, 0), groups=4),
            torch.nn.SiLU()
        )

    def forward(self, x: torch.Tensor):
        y = self.sequential(x)
        return x - y


def create_mid() -> torch.nn.Module:
    return torch.nn.Sequential(
        torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=CLS_CHANNEL, kernel_size=(1, 1, 1), padding=(0, 0, 0), groups=4),
        torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=CLS_CHANNEL, kernel_size=(7, 3, 3), padding=(3, 1, 1), groups=CLS_CHANNEL, stride=(2, 1, 1)),
        torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=CLS_CHANNEL, kernel_size=(7, 3, 3), padding=(3, 1, 1), groups=CLS_CHANNEL, stride=(2, 1, 1)),
        torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=CLS_CHANNEL, kernel_size=(7, 3, 3), padding=(3, 1, 1), groups=CLS_CHANNEL, stride=(2, 1, 1)),
        torch.nn.Conv3d(in_channels=CLS_CHANNEL, out_channels=CLS_CHANNEL, kernel_size=(1, 1, 1), padding=(0, 0, 0), groups=4),
        torch.nn.SiLU()
    )
