import einops
import torch
from torch import nn
from layers.selective_kernel_layers import SelectiveKernelBlock


class SKNet(nn.Module):
    img_input_shape = (3, 224, 224)

    def __init__(
            self,
            out_dim=1000,
            stages_num_config=(3, 4, 6, 3),
            stage_sk_config: tuple = (2, 32, 16),
            stage_io_config: tuple[tuple] = (
                    (128, 256),
                    (256, 512),
                    (512, 1024),
                    (1024, 2048),
            ),
    ):
        super(SKNet, self).__init__()
        self.stages_num_config = stages_num_config
        self.stages_num = len(stages_num_config)
        if isinstance(stage_sk_config[0], int):
            stage_sk_config = [stage_sk_config for _ in range(self.stages_num)]

        assert self.stages_num == len(stage_sk_config) \
            , 'wrong stage_sk_config, it shall be same length as stages_num_config'
        assert self.stages_num == len(stage_io_config) \
            , 'wrong stage_io_config, it shall be same length as stages_num_config'

        self.stage_sk_config = stage_sk_config
        # tuple of (input_channels, output_channels) e.t.((input_channels, output_channels),(input_channels, output_channels))
        self.stage_io_config = stage_io_config

        kernel_size, stride, dilation = 7, 2, 1
        ks = 1 + (kernel_size - 1) * dilation
        padding = (ks - stride) // 2 if (ks - stride) % 2 == 0 else (ks - stride) // 2 + 1

        self.down_sample_conv = nn.Conv2d(3, 64, kernel_size, stride=stride, dilation=dilation, padding=padding)
        self.bn1 = nn.BatchNorm2d(64)
        self.down_sample_pool = nn.MaxPool2d(3, 2, padding=1)
        self.stages = []
        # starts from two downsampling
        features_channels = 64
        kernel_sizes = [3, 5, 7, 9, 11, 13, 15]
        for stage, num in enumerate(self.stages_num_config):
            stride = 2 if stage > 0 else 1
            for _ in range(num):
                input_channels = features_channels
                mid_channels, output_channels = stage_io_config[stage]
                head_num, groups, reduce_ratio = stage_sk_config[stage]
                self.stages.append(
                    SelectiveKernelBlock(input_channels, mid_channels, output_channels,
                                         reduce_ratio=reduce_ratio, kernel_sizes=kernel_sizes[:head_num],
                                         strides=stride, groups=groups)
                )
                stride = 1

                features_channels = output_channels
        # print(*[type(i) for i in self.stages], sep='\n')
        self.stages = nn.Sequential(*self.stages)

        self.fc = None
        if out_dim is not None:
            if out_dim >= 500:
                self.fc = nn.Linear(features_channels, out_dim)
            else:
                mid_dim = 256
                self.fc = nn.Sequential(
                    nn.Linear(features_channels, mid_dim),
                    nn.ReLU(),
                    nn.Linear(mid_dim, out_dim)
                )

    def forward(self, x):
        x = torch.relu(self.bn1(self.down_sample_conv(x)))
        # print(x.shape)
        x = self.down_sample_pool(x)
        # print(x.shape)

        x = self.stages(x)
        x = einops.reduce(x, 'b c h w -> b c', 'mean')
        if self.fc is not None:
            x = self.fc(x)
        return x


def SK_net(input_shape):
    sk = SKNet(out_dim=10)
    # TODO:this net work is still a backbone, you can add the classification net yourself
    return sk


if __name__ == '__main__':
    batch_shape = (32, 3, 224, 224)
    fake_batch = torch.randn(batch_shape)
    nets = [
        SKNet(
            10,
            (2, 2, 2, 2),
            (2, 32, 16),
            (
                (64, 128),
                (128, 256),
                (256, 512),
                (512, 1024),
            ),
        ),
    ]
    # exit()
    for Net in nets:
        output = Net(fake_batch)
        print(type(Net), 'output shape:', output.shape)
