import torch
import torch.nn as nn
from modules.position_embedding import PositionEmbeddingLearned


class Mixerblock(nn.Module):
    def __init__(self, in_ch, in_height, in_width):
        super(Mixerblock, self).__init__()

        divi_width = in_width // 4
        divi_height = in_height // 4
        self.col_mixer1 = nn.Conv2d(in_width, divi_width, 1, 1, 0)
        self.col_mixer2 = nn.Conv2d(in_width, divi_width, 3, 1, 1)
        self.col_mixer3 = nn.Conv2d(in_width, divi_width, 5, 1, 2)
        self.col_mixer4 = nn.Conv2d(in_width, divi_width, 7, 1, 3)

        self.row_mixer1 = nn.Conv2d(in_height, divi_height, 1, 1, 0)
        self.row_mixer2 = nn.Conv2d(in_height, divi_height, 3, 1, 1)
        self.row_mixer3 = nn.Conv2d(in_height, divi_height, 5, 1, 2)
        self.row_mixer4 = nn.Conv2d(in_height, divi_height, 7, 1, 3)

        self.embeddings = PositionEmbeddingLearned(in_width, in_ch // 2)
        self.batch_norm2d = nn.BatchNorm2d(in_ch)
        self.siLu = nn.SiLU()

    def forward(self, x):
        x = self.embeddings(x) + x
        x = x.permute(0, 3, 2, 1)
        x = torch.cat((self.col_mixer1(x), self.col_mixer2(x), self.col_mixer3(x), self.col_mixer4(x)), 1)
        x = x.permute(0, 2, 1, 3)
        x = torch.cat((self.row_mixer1(x), self.row_mixer2(x), self.row_mixer3(x), self.row_mixer4(x)), 1)
        x = x.permute(0, 2, 1, 3)
        x = x.permute(0, 3, 2, 1)
        x = self.siLu(self.batch_norm2d(x))
        return x