import torch
import torch.nn as nn
from model.attention import AttentionModel
from model.mixture_of_experts import HeirarchicalMoE, MoE
from model.classifier import Classifier, AttClassifier
from loguru import logger

from model.config_layer import config_fuse


class FuseLayer(nn.Module):
    def __init__(
        self,
        data,
        fuse_type="concat",
    ):
        super(FuseLayer, self).__init__()

        assert fuse_type in ["concat", "attention", "moe", "hmoe"]

        self.fuse_type = fuse_type
        self.input_dim = config_fuse["input_dim"]
        self.hidden_dim = config_fuse["hidden_dim"]

        logger.info("Build FuseLayer...")
        logger.info("   with {}".format(self.fuse_type))

        if self.fuse_type == "concat":
            self.fuse_model = nn.Sequential(nn.Dropout())

        ## attention model
        elif self.fuse_type == "attention":
            self.fuse_model = AttentionModel(
                d_input=self.input_dim,
                d_model=self.hidden_dim,
                d_ff=2 * self.hidden_dim,
                head=4,
                num_layer=1,
                dropout=0.5,
            )
            for p in self.fuse_model.parameters():
                if p.dim() > 1:
                    nn.init.xavier_uniform_(p)

        elif self.fuse_type == "moe":
            self.fuse_model = MoE(
                dim=self.hidden_dim,
                num_experts=4,  # increase the experts (# parameters) of your model without increasing computation
                hidden_dim=self.hidden_dim
                * 4,  # size of hidden dimension in each expert, defaults to 4 * dimension
                activation=nn.LeakyReLU,  # use your preferred activation, will default to GELU
                second_policy_train="random",  # in top_2 gating, policy for whether to use a second-place expert
                second_policy_eval="random",  # all (always) | none (never) | threshold (if gate value > the given threshold) | random (if gate value > threshold * random_uniform(0, 1))
                second_threshold_train=0.2,
                second_threshold_eval=0.2,
                capacity_factor_train=1.25,  # experts have fixed capacity per batch. we need some extra capacity in case gating is not perfectly balanced.
                capacity_factor_eval=2.0,  # capacity_factor_* should be set to a value >=1
                loss_coef=1e-2,
            )

        elif self.fuse_type == "hmoe":
            self.fuse_model = HeirarchicalMoE(
                dim=self.hidden_dim,
                num_experts=(4, 4),
            )

    def forward(self, input, mask=None):

        if self.fuse_type == "concat":
            feature_out_d = self.fuse_model(input)
        elif self.fuse_type == "attention":
            feature_out_d = self.fuse_model(input, mask)
        else:
            feature_out_d, aux_loss = self.fuse_model(input)

        assert feature_out_d.size()[-1] == self.input_dim

        if self.fuse_type == "moe" or self.fuse_type == "hmoe":
            return feature_out_d, aux_loss
        else:
            return feature_out_d
