class MoEConfig:
    def __init__(self, num_experts, hidden_dim, top_k, num_shared_experts):
        super().__init__()
        self.num_experts = num_experts
        self.hidden_dim = hidden_dim
        self.top_k = top_k
        self.num_shared_experts = num_shared_experts

import torch
import torch.nn as nn
import torch.nn.functional as F
import baseMoE

class MoERouter(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.gate = nn.Linear(config.hidden_dim, config.num_experts)
        self.experts_num = config.num_experts
        self.top_k = config.top_k
    
    def forward(self, x):
        router_logits = self.gate(x)
        router_probs = F.softmax(router_logits, dim=-1, dtype=torch.float32)
        router_weights, selected_experts_index = torch.topk(router_probs, self.top_k, dim=-1)
        router_weights = router_weights / torch.sum(router_weights, dim=-1, keepdim=True)
        router_weights = router_weights.to(x.dtype)
        expert_mask = F.one_hot(selected_experts_index, num_classes=self.experts_num).permute(2, 1, 0)
        return router_logits, router_weights, selected_experts_index, expert_mask

class SparseMoE(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.confg = config
        self.experts = nn.ModuleList([baseMoE.BaseExpert(config.hidden_dim, config.hidden_dim) for _ in range(config.num_experts)])
        self.router = MoERouter(config)
        
    def forward(self, x):
        batch_size, seq_len, hidden_dim = x.size()
        x = x.view(-1 , hidden_dim)
        router_logits, router_weights, selected_experts_index, expert_masks = self.router(x)
        final_hidden_states = torch.zeros(
            (batch_size * seq_len, hidden_dim), dtype=x.dtype, device=x.device
        )
        for expert_idx in range(self.confg.num_experts):
            expert_layer = self.experts[expert_idx]
            current_expert_mask = expert_masks[expert_idx]
            router_idx, top_x = torch.where(current_expert_mask)
            current_state = x.unsqueeze(0)[:, top_x, :].reshape(-1, hidden_dim)
            current_state = expert_layer(current_state)
            current_token_router_weight = router_weights[top_x, router_idx].unsqueeze(-1)
            current_hidden_states = current_state * current_token_router_weight
            final_hidden_states.index_add_(0, top_x, current_hidden_states)
        final_hidden_states = final_hidden_states.view(batch_size, seq_len, hidden_dim)
        return final_hidden_states, router_logits






