# %%
# * 代码网址:https://bruceyuan.com/llms-zero-to-hero/the-way-of-moe-model-evolution.html 
import torch.nn as nn
import torch
import torch.nn.functional as F
# %%
class MOERouter(nn.Module):
    def __init__(self, hidden_dim, expert_number, top_k) -> None:
        super().__init__()
        self.gate = nn.Linear(hidden_dim, expert_number)
        self.expert_number = expert_number
        self.top_k = top_k
    
    def forward(self, x):
        # 计算概率值
        router_logits = self.gate(x) # (8, 2)
        # 计算经过softmax之后的概率
        routing_probs = F.softmax(router_logits, dim=1, dtype=torch.float)
        # 计算topk的专家输出
        router_weights, selected_experts = torch.topk(
            routing_probs, self.top_k, dim=-1
        )
        # 获取topk的权重 进行归一化
        router_weights = router_weights / router_weights.sum(dim=-1, keepdim=True)
        router_weights = router_weights.to(x.dtype)

        #  生成专家掩码 便于后面与专家网络输出的*计算
        expert_mask = F.one_hot(
            selected_experts,
            self.expert_number
        ) # (8, 2, 2)

        expert_mask = expert_mask.permute(2, 1, 0) # (2, 2, 8)

        return router_logits, router_weights, selected_experts, expert_mask
# %%
class MOEConfig:
    def __init__(self, hidden_dim, expert_number, top_k, shared_experts_number=2) -> None:
        self.hidden_dim = hidden_dim
        self.expert_number = expert_number
        self.top_k = top_k
        self.shared_experts_number = shared_experts_number
# %%
class BasicExpert(nn.Module):
    def __init__(self, feature_in, feature_out):
        super().__init__()
        self.linear = nn.Linear(feature_in, feature_out)
    def forward(self, x):
        return self.linear(x)
# %%
class SparseMOE(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.hidden_dim = config.hidden_dim
        self.expert_number = config.expert_number
        self.top_k = config.top_k

        self.experts = nn.ModuleList(
            [
                BasicExpert(self.hidden_dim, self.hidden_dim) for _ in range(self.expert_number)
            ]
        )

        self.router = MOERouter(self.hidden_dim, self.expert_number, self.top_k)
    
    def forward(self, x):
        batch_size, seq_len, hidden_dim = x.size()  # (2, 4, 16)

        # ? 这里待理解，为什么要合并维度，使用token维度，如果保持原样会有什么？ 为什么是对token进行操作？
        hidden_states = x.view(-1, hidden_dim) # (8 ,16)

        router_logits, router_weights, selected_experts_indices, expert_mask = self.router(hidden_states)

        final_hidden_states = torch.zeros(
            (batch_size * seq_len, hidden_dim),
            dtype=hidden_states.dtype,
            device=hidden_states.device
        )

        for experts_idx in range(self.expert_number):
            expert_layer = self.experts[experts_idx]
            #  ? 维度变化 orch.where 的作用 寻找非0元素索引: idx（行索引）-tensor([1, 1, 1, 1, 1, 1, 1, 1]) top_x（列索引）-tensor([0, 1, 2, 3, 4, 5, 6, 7])
            idx, top_x = torch.where(expert_mask[experts_idx])
            # hidden_states.unsqueeze(0) - [1, 8, 16] current_state-[8, 16]
            current_state = hidden_states.unsqueeze(0)[:, top_x, :].reshape(-1, hidden_dim)
            # debug 这里 
            current_hidden_states = expert_layer(
                current_state
            ) * router_weights[top_x, idx].unsqueeze(-1)  # 这里router_weights先 进行广播shape=(8,16),然后进行了点乘

            final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
        
        final_hidden_states = final_hidden_states.reshape(batch_size, seq_len, hidden_dim)
        return final_hidden_states, router_logits
# %%
def test_token_level_moe():
    x = torch.rand(2, 4, 16)
    config = MOEConfig(16, 3, 2)
    token_level_moe = SparseMOE(config)
    out = token_level_moe(x)
    print(out[0].shape, out[1].shape)
    print(out[0])
# %%
test_token_level_moe()
# %%