# %%
import torch
import torch.nn as nn
from MOE_learn_SparseMoE import SparseMOE, MOEConfig, BasicExpert
# %%
class ShareMOE(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.share_expert = nn.ModuleList(
            [BasicExpert(config.hidden_dim, config.hidden_dim) for _ in range(config.shared_experts_number)]
        )
        self.rounter_expert = SparseMOE(config)

    def forward(self, x):
        final_hidden_states, router_logits = self.rounter_expert(x)

        shared_experts_out = [
            expert(x) for expert in self.share_expert
        ]

        shared_experts_out = torch.stack(
            shared_experts_out, dim=0
        ).sum(0)

        return shared_experts_out + final_hidden_states, router_logits
# %%
def test_share_expert_moe():
    x = torch.rand(2, 4, 16)
    config = MOEConfig(16, 3, 2, 2)
    share_MOE = ShareMOE(config)
    out = share_MOE(x)
    print(out[0].shape, out[1].shape)
    print(out[1])
# %%
test_share_expert_moe()
# %%
