import torch
from torch import nn
import torch.nn.functional as F


class MLP_block(nn.Module):
    def __init__(self, input_dim, dim):
        super(MLP_block, self).__init__()
        self.MLP_1 = nn.Linear(input_dim, dim)
        self.MLP_2 = nn.Linear(dim, input_dim)

    def forward(self, x):
        x = self.MLP_1(x)
        x = F.gelu(x)
        x = self.MLP_2(x)
        return x


class Mixer_layer(nn.Module):
    def __init__(self, shape, ds, dc):
        super(Mixer_layer, self).__init__()
        # layer norm on channels: may be wrong
        self.layer_norm_1 = nn.LayerNorm(shape[1])
        self.mlp_block_1 = MLP_block(shape[0], ds)
        # layer norm on channels: may be wrong
        self.layer_norm_2 = nn.LayerNorm(shape[1])
        self.mlp_block_2 = MLP_block(shape[1], dc)

    def forward(self, x):
        x1 = self.mlp_block_1(self.layer_norm_1(x).transpose(-2, -1)).transpose(-2, -1) + x
        x2 = self.mlp_block_2(self.layer_norm_2(x1)) + x1
        return x2


class MLP_Mixer(nn.Module):
    def __init__(self, img_shape, patch_shape, hidden_channel_num, mixer_layer_num, ds, dc, output_classes):
        super(MLP_Mixer, self).__init__()
        # some records
        self.img_shape = img_shape
        self.patch_shape = patch_shape
        self.hidden_channel_num = hidden_channel_num
        self.mixer_layer_num = mixer_layer_num
        self.ds = ds
        self.dc = dc
        self.sequence_length = (img_shape[0] * img_shape[1]) // (patch_shape[0] * patch_shape[1])
        self.output_classes = output_classes

        self.token_embedding = nn.Conv2d(3, hidden_channel_num, patch_shape, patch_shape)
        self.mixer_layers = nn.Sequential(
            *[Mixer_layer((self.sequence_length, hidden_channel_num), ds, dc) for _ in range(mixer_layer_num)]
        )
        self.classifier = nn.Linear(self.sequence_length, output_classes)


    def forward(self, x):
        x = self.token_embedding(x)
        x=x.reshape(*x.shape[:2],-1)
        x = self.mixer_layers(x)
        x = torch.mean(x, dim = 1)
        x = self.classifier(x)
        return x




BTACH_SIZE = 32
IMG_CHANNEL_NUM = 3
IMG_SHAPE = (224, 224)
PATCH_SHAPE = (32, 32)
HIDDEN_CHANNEL_NUM = 49
MIXER_LAYER_NUM = 8
DS = 256
DC = 2048
OUTPUT_CLASS_NUM = 10

mlp_mixer = MLP_Mixer(IMG_SHAPE, PATCH_SHAPE, HIDDEN_CHANNEL_NUM, MIXER_LAYER_NUM, DS, DC, OUTPUT_CLASS_NUM)
