import torch
from torch import nn

# 原始moe
# 专家网络
class ExpertNetwork(nn.Module):
    def __init__(self, hidden_size, intermediate_size):
        super().__init__()
        self.hidden_size = hidden_size  # 4096
        self.intermediate_size = intermediate_size  # 2048

        self.linear1 = nn.Linear(hidden_size, intermediate_size)  # weight.shape = [intermediate_size, hidden_size] = [2048, 4096]
        self.linear2 = nn.Linear(intermediate_size, hidden_size)

    def forward(self, x):
        print(f'x.shape={x.shape}')  # [4096]
        print(f'self.linear1.weight.shape={self.linear1.weight.shape}')  # [2048, 4096]
        # linear = x @ weight.T
        # [hidden_size] @ [hidden_size, intermediate_size] = [intermediate_size]
        # [4096] @ [4096, 2048] = [2048]
        # 注：@表示矩阵乘
        # dense层的intermediate_size = 16384，分为8个专家，每个专家的intermediate_size = 2048
        x = self.linear1(x)
        print(f'after x.shape={x.shape}\n')  # [2048]
        x = nn.functional.relu(x)
        output = self.linear2(x)
        return output


# 路由网络
class Router(nn.Module):
    def __init__(self, hidden_size, expert_num, top_k):
        super().__init__()
        # 路由线性层
        self.router = nn.Linear(hidden_size, expert_num)
        self.top_k = top_k
        self.hidden_size = hidden_size

    def forward(self, x):
        # 将 [batch_size, seq_len, hidden_size] reshape 为 [token_num, hidden_size]
        x = x.view(-1, self.hidden_size)
        x = self.router(x)
        # softmax转每个专家的分数
        x = nn.functional.softmax(x, dim=-1)
        topk_score, topk_idx = torch.topk(x, k=self.top_k, dim=-1, sorted=False)
        # 对score归一化
        topk_score = topk_score / topk_score.sum(dim=-1, keepdim=True)
        return topk_score, topk_idx

# moe层
class MOELayer(nn.Module):
    def __init__(self, hidden_size, intermediate_size, expert_num, top_k):
        super().__init__()
        self.hidden_size = hidden_size
        self.intermediate_size = intermediate_size
        self.expert_num = expert_num
        self.top_k = top_k
        self.experts = nn.ModuleList(
            [ExpertNetwork(self.hidden_size, self.intermediate_size) for _ in range(self.expert_num)]
        )
        self.router = Router(self.hidden_size, self.expert_num, self.top_k)

    def forward(self, x):
        # x.shape为 [batch_size, seq_len, hidden_size]
        batch_size, seq_len, _ = x.size()
        token_num = batch_size * seq_len
        x_flat = x.view(token_num, self.hidden_size)
        # 通过路由器获得top_k专家选择的分数和索引，形状均为 [token_num, top_k]
        topk_score, topk_idx = self.router(x)
        # 初始化输出张量
        output = torch.zeros_like(x_flat)
        for token_idx in range(token_num):
            for k_i in range(self.top_k):
                expert = self.experts[topk_idx[token_idx, k_i]]
                # 一个token下，经过每个专家，然后乘以专家权重，累加起来，结果就是该token的输出
                output[token_idx] += topk_score[token_idx, k_i] * expert(x_flat[token_idx])
        output = output.view(batch_size, seq_len, self.hidden_size)
        return output

HIDDEN_SIZE = 4096
INTERMEDIATE_SIZE = 2048
EXPERT_NUM = 8
TOP_K = 2

inputs = torch.randn(2, 11, 4096)
moe_layer = MOELayer(HIDDEN_SIZE, INTERMEDIATE_SIZE, EXPERT_NUM, TOP_K)
outputs = moe_layer(inputs)
print(outputs.size())

