import torch
import pytorch_lightning as pl
from torch import nn
import torch.nn.functional as F
import torchvision.models as basemodels
import ivtmetrics
import pathlib
import json


tool_weight = [0.93487068, 0.94234964, 0.93487068, 1.18448115, 1.02368339, 0.97974447]
verb_weight = [
    0.60002400, 0.60002400, 0.60002400, 0.61682467,
    0.67082683, 0.80163207, 0.70562823, 2.11208448,
    2.69230769, 0.60062402
]
target_weight = [
    0.49752894, 0.52041527, 0.49752894, 0.51394739,
    2.71899565, 1.75577963, 0.58509403, 1.25228034,
    0.49752894, 2.42993134, 0.49802647, 0.87266576,
    1.36074165, 0.50150917, 0.49802647
]

# 学习率
learning_rates = [0.01, 0.01, 0.01]
# 动态学习率的策略分隔点，先线性变化，再指数变化（分别用于tool, verb&target, triplet）
warmups = [9, 18, 58]
# 学习率线性变化时的起始乘数
lr_start_power = 0.1
# 学习率指数变化时的衰减率gamma
lr_decay_rate = 0.99
# 权重衰减系数
weight_decay = 1e-5
# 初始学习率
initial_learning_rates = [lr/lr_start_power for lr in learning_rates]

class Rendezvous(pl.LightningModule):
    def __init__(
        self,
        basename="resnet18",
        num_tool=6,
        num_verb=10,
        num_target=15,
        num_triplet=100,
        layer_size=8,
        num_heads=4,
        d_model=128,
        hr_output=False,
        use_ln=False,
    ):
        super().__init__()

        self.encoder = Encoder(basename, num_tool, num_verb, num_target, num_triplet, hr_output=hr_output)
        self.decoder = Decoder(layer_size, d_model, num_heads, num_triplet, use_ln=use_ln)

    def on_train_start(self):
        self.loss_fn_i = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(tool_weight).cuda())
        self.loss_fn_v = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(verb_weight).cuda())
        self.loss_fn_t = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(target_weight).cuda())
        self.loss_fn_ivt = nn.BCEWithLogitsLoss()
        self.mAP = ivtmetrics.Recognition(100)
        self.mAP_i = ivtmetrics.Recognition(6)
        self.mAP_v = ivtmetrics.Recognition(10)
        self.mAP_t = ivtmetrics.Recognition(15)
        self.mAP.reset_global()
        self.mAP_i.reset_global()
        self.mAP_v.reset_global()
        self.mAP_t.reset_global()

    def configure_optimizers(self):
        # return super().configure_optimizers()
        module_i = list(
            set(self.parameters())
            - set(self.encoder.cagam.parameters())
            - set(self.encoder.bottleneck.parameters())
            - set(self.decoder.parameters())
        )
        module_ivt = list(set(self.encoder.bottleneck.parameters()).union(set(self.decoder.parameters())))
        module_vt = self.encoder.cagam.parameters()

        optimizer_i = torch.optim.SGD(module_i, lr=initial_learning_rates[0], weight_decay=weight_decay)
        scheduler_ia = torch.optim.lr_scheduler.LinearLR(
            optimizer_i, start_factor=lr_start_power, total_iters=warmups[0]
        )
        scheduler_ib = torch.optim.lr_scheduler.ExponentialLR(optimizer_i, gamma=lr_decay_rate)
        scheduler_i = torch.optim.lr_scheduler.SequentialLR(
            optimizer_i, schedulers=[scheduler_ia, scheduler_ib], milestones=[warmups[0] + 1]
        )

        optimizer_vt = torch.optim.SGD(module_vt, lr=initial_learning_rates[1], weight_decay=weight_decay)
        scheduler_vta = torch.optim.lr_scheduler.LinearLR(
            optimizer_vt, start_factor=lr_start_power, total_iters=warmups[1]
        )
        scheduler_vtb = torch.optim.lr_scheduler.ExponentialLR(optimizer_vt, gamma=lr_decay_rate)
        scheduler_vt = torch.optim.lr_scheduler.SequentialLR(
            optimizer_vt, schedulers=[scheduler_vta, scheduler_vtb], milestones=[warmups[1] + 1]
        )

        optimizer_ivt = torch.optim.SGD(module_vt, lr=initial_learning_rates[2], weight_decay=weight_decay)
        scheduler_ivta = torch.optim.lr_scheduler.LinearLR(
            optimizer_ivt, start_factor=lr_start_power, total_iters=warmups[2]
        )
        scheduler_ivtb = torch.optim.lr_scheduler.ExponentialLR(optimizer_ivt, gamma=lr_decay_rate)
        scheduler_ivt = torch.optim.lr_scheduler.SequentialLR(
            optimizer_ivt, schedulers=[scheduler_ivta, scheduler_ivtb], milestones=[warmups[2] + 1]
        )

        return [
            {
                "optimizer": optimizer_i,
                "lr_scheduler": scheduler_i
            },
            {
                "optimizer": optimizer_vt,
                "lr_scheduler": scheduler_vt
            },
            {
                "optimizer": optimizer_ivt,
                "lr_scheduler": scheduler_ivt
            }
        ]

    def training_step(self, batch, batch_idx):
        # 此处的label需要为one-hot
        img, label_i, label_v, label_t, label_ivt = batch
        (cam_i, logit_i), (cam_v, logit_v), (cam_t, logit_t), logit_ivt = self.forward(img)
        loss_i = self.loss_fn_i(logit_i, label_i.float())
        loss_v = self.loss_fn_v(logit_v, label_v.float())
        loss_t = self.loss_fn_t(logit_t, label_t.float())
        loss_ivt = self.loss_fn_ivt(logit_ivt, label_ivt.float())
        loss = loss_i + loss_v + loss_t + loss_ivt

        return loss

    def on_validation_start(self):
        self.mAP.reset_global()

    def on_validation_epoch_start(self):
        self.mAP.reset()

    def validation_step(self, batch, batch_idx):
        img, label_i, label_v, label_t, label_ivt = batch
        (cam_i, logit_i), (cam_v, logit_v), (cam_t, logit_t), logit_ivt = self.forward(img)
        self.mAP.update(label_ivt.float().detach().cpu(), F.sigmoid(logit_ivt).detach().cpu())
    
    def on_validation_epoch_end(self):
        self.mAP.video_end()
        self.log("mAP", self.mAP.compute_video_AP()['mAP'], on_epoch=True, on_step=False, logger=True)

    def on_test_start(self):
        self.mAP.reset_global()
        
    def on_test_epoch_start(self):
        self.mAP.reset()
        self.mAP_v.reset()
        self.mAP_i.reset()
        self.mAP_t.reset()


    def test_step(self,  batch, batch_idx):
        img, label_i, label_v, label_t, label_ivt = batch
        (cam_i, logit_i), (cam_v, logit_v), (cam_t, logit_t), logit_ivt = self.forward(img)
        self.mAP_i.update(label_i.float().detach().cpu(), F.sigmoid(logit_i).detach().cpu())
        self.mAP_v.update(label_v.float().detach().cpu(), F.sigmoid(logit_v).detach().cpu())
        self.mAP_t.update(label_t.float().detach().cpu(), F.sigmoid(logit_t).detach().cpu())
        self.mAP.update(label_ivt.float().detach().cpu(), F.sigmoid(logit_ivt).detach().cpu())
    
    def on_test_epoch_end(self):
        self.mAP.video_end()
        self.mAP_v.video_end()
        self.mAP_t.video_end()
        self.mAP_i.video_end()
    
    def on_test_end(self):
        mAP_i = self.mAP_i.compute_video_AP()
        mAP_v = self.mAP_v.compute_video_AP()
        mAP_t = self.mAP_t.compute_video_AP()

        mAP_iv = self.mAP.compute_video_AP('iv')
        mAP_it = self.mAP.compute_video_AP('it')
        mAP_ivt = self.mAP.compute_video_AP('ivt')

        log_path = pathlib.Path(self.trainer.log_dir) / "myjsonlog.json"
        test_log = {
            "AP_i": list(mAP_i['AP']),
            "AP_v": list(mAP_v['AP']),
            "AP_t": list(mAP_t['AP']),
            "AP_iv": list(mAP_iv['AP']),
            "AP_it": list(mAP_it['AP']),
            "AP_ivt": list(mAP_ivt['AP']),
            "mAP_i": mAP_i['mAP'],
            "mAP_v": mAP_v['mAP'],
            "mAP_t": mAP_t['mAP'],
            "mAP_iv": mAP_iv['mAP'],
            "mAP_it": mAP_it['mAP'],
            "mAP_ivt": mAP_ivt['mAP']
        }
        with open(log_path, "w", encoding="utf-8") as log_file:
            json.dump(test_log, log_file)

        self.log("mAP_i", mAP_i['mAP'], logger=True)
        self.log("mAP_v", mAP_v['mAP'], logger=True)
        self.log("mAP_t", mAP_t['mAP'], logger=True)
        self.log("mAP_iv", mAP_iv['mAP'], logger=True)
        self.log("mAP_it", mAP_it['mAP'], logger=True)
        self.log("mAP_ivt", mAP_ivt['mAP'], logger=True)


    def forward(self, inputs):
        enc_i, enc_v, enc_t, enc_ivt = self.encoder(inputs)
        dec_ivt = self.decoder(enc_i, enc_v, enc_t, enc_ivt)
        return enc_i, enc_v, enc_t, dec_ivt


# %% Triplet Components Feature Encoder
class Encoder(nn.Module):
    def __init__(
        self, basename="resnet18", num_tool=6, num_verb=10, num_target=15, num_triplet=100, hr_output=False
    ):
        super(Encoder, self).__init__()
        depth = 64 if basename == "resnet18" else 128
        self.basemodel = BaseModel(basename, hr_output)
        self.wsl = WSL(num_tool, depth)
        self.cagam = CAGAM(num_tool, num_verb, num_target)
        self.bottleneck = Bottleneck(num_triplet)

    def forward(self, x):
        high_x, low_x = self.basemodel(x)
        enc_i = self.wsl(high_x)
        enc_v, enc_t = self.cagam(high_x, enc_i[0])
        enc_ivt = self.bottleneck(low_x)
        return enc_i, enc_v, enc_t, enc_ivt


# %% MultiHead Attention Decoder
class Decoder(nn.Module):
    def __init__(self, layer_size, d_model, num_heads, num_class=100, use_ln=False):
        super(Decoder, self).__init__()
        self.projection = nn.ModuleList(
            [Projection(num_triplet=num_class, out_depth=d_model) for i in range(layer_size)]
        )
        self.mhma = nn.ModuleList(
            [
                MHMA(num_class=num_class, depth=d_model, num_heads=num_heads, use_ln=use_ln)
                for i in range(layer_size)
            ]
        )
        self.ffnet = nn.ModuleList(
            [FFN(k=layer_size - i - 1, num_class=num_class, use_ln=use_ln) for i in range(layer_size)]
        )
        self.classifier = Classifier(num_class)

    def forward(self, enc_i, enc_v, enc_t, enc_ivt):
        X = enc_ivt.clone()
        for P, M, F in zip(self.projection, self.mhma, self.ffnet):
            X = P(enc_i[0], enc_v[0], enc_t[0], X)
            X = M(X)
            X = F(X)
        logits = self.classifier(X)
        return logits


# %% Feature extraction backbone
class BaseModel(nn.Module):
    def __init__(self, basename="resnet18", hr_output=False, *args):
        super(BaseModel, self).__init__(*args)
        self.output_feature = {}
        if basename == "resnet18":
            self.basemodel = basemodels.resnet18(pretrained=True)
            if hr_output:
                self.increase_resolution()
            self.basemodel.layer1[1].bn2.register_forward_hook(self.get_activation("low_level_feature"))
            self.basemodel.layer4[1].bn2.register_forward_hook(self.get_activation("high_level_feature"))
        if basename == "resnet50":
            self.basemodel = basemodels.resnet50(pretrained=True)
            self.basemodel.layer1[2].bn2.register_forward_hook(self.get_activation("low_level_feature"))
            self.basemodel.layer4[2].bn2.register_forward_hook(self.get_activation("high_level_feature"))

    def increase_resolution(self):
        global OUT_HEIGHT, OUT_WIDTH
        self.basemodel.layer3[0].conv1.stride = (1, 1)
        self.basemodel.layer3[0].downsample[0].stride = (1, 1)
        self.basemodel.layer4[0].conv1.stride = (1, 1)
        self.basemodel.layer4[0].downsample[0].stride = (1, 1)
        OUT_HEIGHT *= 4
        OUT_WIDTH *= 4
        print("using high resolution output ({}x{})".format(OUT_HEIGHT, OUT_WIDTH))

    def get_activation(self, layer_name):
        def hook(module, input, output):
            self.output_feature[layer_name] = output

        return hook

    def forward(self, x):
        _ = self.basemodel(x)
        return self.output_feature["high_level_feature"], self.output_feature["low_level_feature"]


# %% Weakly-Supervised localization
class WSL(nn.Module):
    def __init__(self, num_class, depth=64):
        super(WSL, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=512, out_channels=depth, kernel_size=3, padding=1)
        self.cam = nn.Conv2d(in_channels=depth, out_channels=num_class, kernel_size=1)
        self.elu = nn.ELU()
        self.bn = nn.BatchNorm2d(depth)
        self.gmp = nn.AdaptiveMaxPool2d((1, 1))

    def forward(self, x):
        feature = self.conv1(x)
        feature = self.bn(feature)
        feature = self.elu(feature)
        cam = self.cam(feature)
        logits = self.gmp(cam).squeeze(-1).squeeze(-1)
        return cam, logits


# %% Unfiltered Bottleneck layer
class Bottleneck(nn.Module):
    def __init__(self, num_class):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=64, out_channels=256, stride=(2, 2), kernel_size=3)
        self.conv2 = nn.Conv2d(in_channels=256, out_channels=num_class, kernel_size=1)
        self.elu = nn.ELU()
        self.bn1 = nn.BatchNorm2d(256)
        self.bn2 = nn.BatchNorm2d(num_class)

    def forward(self, x):
        feature = self.conv1(x)
        feature = self.bn1(feature)
        feature = self.elu(feature)
        feature = self.conv2(feature)
        feature = self.bn2(feature)
        feature = self.elu(feature)
        return feature


# %% Class Activation Guided Attention Mechanism
class CAGAM(nn.Module):
    def __init__(self, num_tool, num_verb, num_target, in_depth=512):
        super(CAGAM, self).__init__()
        out_depth = num_tool
        self.verb_context = nn.Conv2d(in_channels=in_depth, out_channels=out_depth, kernel_size=3, padding=1)
        self.verb_query = nn.Conv2d(in_channels=out_depth, out_channels=out_depth, kernel_size=1)
        self.verb_tool_query = nn.Conv2d(in_channels=out_depth, out_channels=out_depth, kernel_size=1)
        self.verb_key = nn.Conv2d(in_channels=out_depth, out_channels=out_depth, kernel_size=1)
        self.verb_tool_key = nn.Conv2d(in_channels=out_depth, out_channels=out_depth, kernel_size=1)
        self.verb_cmap = nn.Conv2d(in_channels=out_depth, out_channels=num_verb, kernel_size=1)
        self.target_context = nn.Conv2d(
            in_channels=in_depth, out_channels=out_depth, kernel_size=3, padding=1
        )
        self.target_query = nn.Conv2d(in_channels=out_depth, out_channels=out_depth, kernel_size=1)
        self.target_tool_query = nn.Conv2d(in_channels=out_depth, out_channels=out_depth, kernel_size=1)
        self.target_key = nn.Conv2d(in_channels=out_depth, out_channels=out_depth, kernel_size=1)
        self.target_tool_key = nn.Conv2d(in_channels=out_depth, out_channels=out_depth, kernel_size=1)
        self.target_cmap = nn.Conv2d(in_channels=out_depth, out_channels=num_target, kernel_size=1)
        self.gmp = nn.AdaptiveMaxPool2d((1, 1))
        self.elu = nn.ELU()
        self.soft = nn.Softmax(dim=1)
        self.flat = nn.Flatten(2, 3)
        self.bn1 = nn.BatchNorm2d(out_depth)
        self.bn2 = nn.BatchNorm2d(out_depth)
        self.bn3 = nn.BatchNorm2d(out_depth)
        self.bn4 = nn.BatchNorm2d(out_depth)
        self.bn5 = nn.BatchNorm2d(out_depth)
        self.bn6 = nn.BatchNorm2d(out_depth)
        self.bn7 = nn.BatchNorm2d(out_depth)
        self.bn8 = nn.BatchNorm2d(out_depth)
        self.bn9 = nn.BatchNorm2d(out_depth)
        self.bn10 = nn.BatchNorm2d(out_depth)
        self.bn11 = nn.BatchNorm2d(out_depth)
        self.bn12 = nn.BatchNorm2d(out_depth)
        self.encoder_cagam_verb_beta = torch.nn.Parameter(torch.randn(1))
        self.encoder_cagam_target_beta = torch.nn.Parameter(torch.randn(1))

    def get_verb(self, raw, cam):
        x = self.elu(self.bn1(self.verb_context(raw)))
        z = x.clone()
        sh = list(z.shape)
        sh[0] = -1
        q1 = self.elu(self.bn2(self.verb_query(x)))
        k1 = self.elu(self.bn3(self.verb_key(x)))
        w1 = self.flat(k1).matmul(self.flat(q1).transpose(-1, -2))
        q2 = self.elu(self.bn4(self.verb_tool_query(cam)))
        k2 = self.elu(self.bn5(self.verb_tool_key(cam)))
        w2 = self.flat(k2).matmul(self.flat(q2).transpose(-1, -2))
        attention = (w1 * w2) / torch.sqrt(torch.tensor(sh[-1], dtype=torch.float32))
        attention = self.soft(attention)
        v = self.flat(z)
        e = (attention.matmul(v) * self.encoder_cagam_verb_beta).reshape(sh)
        e = self.bn6(e + z)
        cmap = self.verb_cmap(e)
        y = self.gmp(cmap).squeeze(-1).squeeze(-1)
        return cmap, y

    def get_target(self, raw, cam):
        x = self.elu(self.bn7(self.target_context(raw)))
        z = x.clone()
        sh = list(z.shape)
        sh[0] = -1
        q1 = self.elu(self.bn8(self.target_query(x)))
        k1 = self.elu(self.bn9(self.target_key(x)))
        w1 = self.flat(k1).transpose(-1, -2).matmul(self.flat(q1))
        q2 = self.elu(self.bn10(self.target_tool_query(cam)))
        k2 = self.elu(self.bn11(self.target_tool_key(cam)))
        w2 = self.flat(k2).transpose(-1, -2).matmul(self.flat(q2))
        attention = (w1 * w2) / torch.sqrt(torch.tensor(sh[-1], dtype=torch.float32))
        attention = self.soft(attention)
        v = self.flat(z)
        e = (v.matmul(attention) * self.encoder_cagam_target_beta).reshape(sh)
        e = self.bn12(e + z)
        cmap = self.target_cmap(e)
        y = self.gmp(cmap).squeeze(-1).squeeze(-1)
        return cmap, y

    def forward(self, x, cam):
        cam_v, logit_v = self.get_verb(x, cam)
        cam_t, logit_t = self.get_target(x, cam)
        return (cam_v, logit_v), (cam_t, logit_t)


# %% Projection function
class Projection(nn.Module):
    def __init__(self, num_tool=6, num_verb=10, num_target=15, num_triplet=100, out_depth=128):
        super(Projection, self).__init__()
        self.ivt_value = nn.Conv2d(in_channels=num_triplet, out_channels=out_depth, kernel_size=1)
        self.i_value = nn.Conv2d(in_channels=num_tool, out_channels=out_depth, kernel_size=1)
        self.v_value = nn.Conv2d(in_channels=num_verb, out_channels=out_depth, kernel_size=1)
        self.t_value = nn.Conv2d(in_channels=num_target, out_channels=out_depth, kernel_size=1)
        self.ivt_query = nn.Linear(in_features=num_triplet, out_features=out_depth)
        self.dropout = nn.Dropout(p=0.3)
        self.ivt_key = nn.Linear(in_features=num_triplet, out_features=out_depth)
        self.i_key = nn.Linear(in_features=num_tool, out_features=out_depth)
        self.v_key = nn.Linear(in_features=num_verb, out_features=out_depth)
        self.t_key = nn.Linear(in_features=num_target, out_features=out_depth)
        self.gap = nn.AdaptiveAvgPool2d((1, 1))
        self.elu = nn.ELU()
        self.bn1 = nn.BatchNorm1d(out_depth)
        self.bn2 = nn.BatchNorm1d(out_depth)
        self.bn3 = nn.BatchNorm2d(out_depth)
        self.bn4 = nn.BatchNorm1d(out_depth)
        self.bn5 = nn.BatchNorm2d(out_depth)
        self.bn6 = nn.BatchNorm1d(out_depth)
        self.bn7 = nn.BatchNorm2d(out_depth)
        self.bn8 = nn.BatchNorm1d(out_depth)
        self.bn9 = nn.BatchNorm2d(out_depth)

    def forward(self, cam_i, cam_v, cam_t, X):
        q = self.elu(self.bn1(self.ivt_query(self.dropout(self.gap(X).squeeze(-1).squeeze(-1)))))
        k = self.elu(self.bn2(self.ivt_key(self.gap(X).squeeze(-1).squeeze(-1))))
        v = self.bn3(self.ivt_value(X))
        k1 = self.elu(self.bn4(self.i_key(self.gap(cam_i).squeeze(-1).squeeze(-1))))
        v1 = self.elu(self.bn5(self.i_value(cam_i)))
        k2 = self.elu(self.bn6(self.v_key(self.gap(cam_v).squeeze(-1).squeeze(-1))))
        v2 = self.elu(self.bn7(self.v_value(cam_v)))
        k3 = self.elu(self.bn8(self.t_key(self.gap(cam_t).squeeze(-1).squeeze(-1))))
        v3 = self.elu(self.bn9(self.t_value(cam_t)))
        sh = list(v1.shape)
        v = self.elu(F.interpolate(v, (sh[2], sh[3])))
        X = self.elu(F.interpolate(X, (sh[2], sh[3])))
        return (X, (k1, v1), (k2, v2), (k3, v3), (q, k, v))


# %% Multi-head of self and cross attention
class MHMA(nn.Module):
    def __init__(self, depth, num_class=100, num_heads=4, use_ln=False):
        super(MHMA, self).__init__()
        self.concat = nn.Conv2d(
            in_channels=depth * num_heads, out_channels=num_class, kernel_size=3, padding=1
        )
        self.bn = nn.BatchNorm2d(num_class)
        self.ln = nn.LayerNorm([num_class, OUT_HEIGHT, OUT_WIDTH]) if use_ln else nn.BatchNorm2d(num_class)
        self.elu = nn.ELU()
        self.soft = nn.Softmax(dim=1)
        self.heads = num_heads

    def scale_dot_product(self, key, value, query):
        dk = torch.sqrt(torch.tensor(list(key.shape)[-2], dtype=torch.float32))
        affinity = key.matmul(query.transpose(-1, -2))
        attn_w = affinity / dk
        attn_w = self.soft(attn_w)
        attention = attn_w.matmul(value)
        return attention

    def forward(self, inputs):
        (X, (k1, v1), (k2, v2), (k3, v3), (q, k, v)) = inputs
        query = torch.stack([q] * self.heads, dim=1)  # [B,Head,D]
        query = query.unsqueeze(dim=-1)  # [B,Head,D,1]
        key = torch.stack([k, k1, k2, k3], dim=1)  # [B,Head,D]
        key = key.unsqueeze(dim=-1)  # [B,Head,D,1]
        value = torch.stack([v, v1, v2, v3], dim=1)  # [B,Head,D,H,W]
        dims = list(value.shape)  # [B,Head,D,H,W]
        value = value.reshape([-1, dims[1], dims[2], dims[3] * dims[4]])  # [B,Head,D,HW]
        attn = self.scale_dot_product(key, value, query)  # [B,Head,D,HW]
        attn = attn.reshape([-1, dims[1] * dims[2], dims[3], dims[4]])  # [B,DHead,H,W]
        mha = self.elu(self.bn(self.concat(attn)))
        mha = self.ln(mha + X.clone())
        return mha


# %% Feed-forward layer
class FFN(nn.Module):
    def __init__(self, k, num_class=100, use_ln=False):
        super(FFN, self).__init__()

        def Ignore(x):
            return x

        self.conv1 = nn.Conv2d(in_channels=num_class, out_channels=num_class, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(in_channels=num_class, out_channels=num_class, kernel_size=1)
        self.elu1 = nn.ELU()
        self.elu2 = nn.ELU() if k > 0 else Ignore
        self.bn1 = nn.BatchNorm2d(num_class)
        self.bn2 = nn.BatchNorm2d(num_class)
        self.ln = nn.LayerNorm([num_class, OUT_HEIGHT, OUT_WIDTH]) if use_ln else nn.BatchNorm2d(num_class)

    def forward(
        self,
        inputs,
    ):
        x = self.elu1(self.bn1(self.conv1(inputs)))
        x = self.elu2(self.bn2(self.conv2(x)))
        x = self.ln(x + inputs.clone())
        return x


# %% Classification layer
class Classifier(nn.Module):
    def __init__(self, layer_size, num_class=100):
        super(Classifier, self).__init__()
        self.gmp = nn.AdaptiveMaxPool2d((1, 1))
        self.mlp = nn.Linear(in_features=num_class, out_features=num_class)

    def forward(self, inputs):
        x = self.gmp(inputs).squeeze(-1).squeeze(-1)
        y = self.mlp(x)
        return y
