import torch
import torch.nn as nn
import torch.nn.functional as F
from sparse_moe import SparseMOE, BasicExpert, MOEConfig
class SharedExpertMOE(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.routed_experts_moe = SparseMOE(config)
        self.expert_number = config.expert_number
        self.hidden_dim = config.hidden_dim
        self.shared_experts = nn.ModuleList(
            [
                BasicExpert(self.hidden_dim, self.hidden_dim) for _ in range(self.expert_number)
            ]
        )

    def forward(self, x):
        # x shape is (b ,s, hidden_dim)
        shared_experts_ouput_list = [
            expert(x) for expert in self.shared_experts
        ]

        shared_experts_ouput = torch.stack(
            shared_experts_ouput_list,
            dim=0
        )# shape is (shared_experts_number, batch_size, seq_len, hidden_dim)

        shared_expert_out = shared_experts_ouput.sum(dim=0, keepdim=False)
        # shape is (batch_size, seq_len, hidden_dim)
        sparse_moe_out, router_logits = self.routed_experts_moe(
            x
        )
        output = shared_expert_out + sparse_moe_out
        return output, router_logits


def switch_load_balancing_loss(router_logits: torch.Tensor, num_experts: int) -> torch.Tensor:
    """
    计算 Switch Transformers 的负载均衡损失，目的是让每个专家处理的token数量尽可能的均衡

    Args:
        router_logits: shape [batch_size * sequence_length, num_experts]
        num_experts: 专家数量

    Returns:
        total_loss: 总损失 = auxiliary_loss + z_loss
    """
    # 计算路由概率
    router_probs = torch.softmax(router_logits, dim=-1)  # [b*s, num_experts]

    # 获取每个token的最优专家
    _, selected_experts = torch.topk(router_probs, k=2, dim=-1)  # [b*s]

    # 创建one-hot矩阵表示选中的专家
    mask = torch.nn.functional.one_hot(selected_experts, num_experts).float()  # [b*s, num_experts]

    # 计算每个专家的期望负载 (理想情况下应该是 1/num_experts)
    expected_load = torch.ones_like(router_probs) / num_experts

    # 计算实际负载 (每个专家处理的token数量除以总token数量)
    # 在batch维度上计算平均值
    # 表示每个专家实际处理的任务量。
    actual_load = mask.mean(dim=0)  # [num_experts]

    # 计算auxiliary loss
    # 这会惩罚负载分布与期望负载的差异
    # router_probs.mean(dim=0) 表示每个专家被选中的平均概率。
    aux_loss = torch.sum(actual_load * router_probs.mean(dim=0)) * num_experts

    # 计算z_loss (可选)
    # 这会惩罚过大的路由logits
    z_loss = torch.mean(torch.square(router_logits))
    z_loss_weight = 0.001  # 可调整的超参数

    # 总损失
    total_loss = aux_loss + z_loss * z_loss_weight

    return total_loss


def test_moe_training():
    # Create a simple dataset
    batch_size = 32
    seq_len = 16
    hidden_dim = 32
    num_batches = 100

    # Initialize model and optimizer
    config = MOEConfig(hidden_dim=hidden_dim,
                       expert_number=4,
                       top_k=2,
                       shared_experts_number=2)
    model = SharedExpertMOE(config)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    # Training loop
    model.train()
    for batch in range(num_batches):
        # Generate random input data
        x = torch.randn(batch_size, seq_len, hidden_dim)
        target = torch.randn(batch_size, seq_len, hidden_dim)

        # Forward pass
        output, router_logits = model(x)

        # Compute losses
        # MSE loss for prediction
        mse_loss = F.mse_loss(output, target)

        aux_loss = switch_load_balancing_loss(router_logits, config.expert_number)
        # Combined loss
        total_loss = mse_loss + 0.01 * aux_loss

        # Backward pass and optimize
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

        if batch % 10 == 0:
            print(f"Batch {batch}, Loss: {total_loss.item():.4f} "
                  f"(MSE: {mse_loss.item():.4f}, Aux: {aux_loss.item():.4f})")


# Run the training test
test_moe_training()