import torch
import torch.nn as nn
import torch.nn.functional as F

class SiLUActivation(nn.Module):
    """
    自定义SiLU激活函数实现 (Sigmoid Linear Unit)

    SiLU(x) = x * sigmoid(x) = x * (1 / (1 + exp(-x)))

    参考论文:
    - Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415)
    - Sigmoid-Weighted Linear Units for Neural Network Function
      Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118)
    - Swish: a Self-Gated Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1)
    """

    def __init__(self):
        super(SiLUActivation, self).__init__()

    def forward(self, input: torch.Tensor) -> torch.Tensor:
        # SiLU(x) = x * sigmoid(x)
        # 使用 torch.sigmoid 而不是 nn.functional.silu 来自己实现
        return input * torch.sigmoid(input)


def silu_function(x):
    """
    函数式SiLU实现，不依赖torch内置函数
    """
    # SiLU(x) = x * sigmoid(x) = x * (1 / (1 + exp(-x)))
    return x * (1.0 / (1.0 + torch.exp(-x)))


class SwiGLU(nn.Module):
    """
    SwiGLU激活函数实现 (Swish-Gated Linear Unit)

    SwiGLU(x, gate) = Swish(gate) ⊙ x = SiLU(gate) ⊙ x
    其中 ⊙ 表示逐元素相乘

    通常在前馈网络中使用：
    FFN_SwiGLU(x) = (SiLU(xW₁ + b₁) ⊙ (xW₃ + b₃))W₂ + b₂

    参考论文:
    - GLU Variants Improve Transformer (Shazeer, 2020, https://arxiv.org/abs/2002.05202)
    - PaLM: Scaling Language Modeling with Pathways (Chowdhery et al., 2022)
    """

    def __init__(self, dim, hidden_dim=None, bias=True):
        super(SwiGLU, self).__init__()
        if hidden_dim is None:
            hidden_dim = int(dim * 8/3)  # 常用比例，保证参数量与标准FFN相近

        self.gate_proj = nn.Linear(dim, hidden_dim, bias=bias)
        self.up_proj = nn.Linear(dim, hidden_dim, bias=bias)
        self.down_proj = nn.Linear(hidden_dim, dim, bias=bias)

    def forward(self, x):
        # 计算门控值和上投影值
        gate = self.gate_proj(x)  # [batch, seq, hidden_dim]
        up = self.up_proj(x)      # [batch, seq, hidden_dim]

        # SwiGLU: SiLU(gate) ⊙ up
        swish_gate = silu_function(gate)
        gated = swish_gate * up

        # 下投影回原始维度
        output = self.down_proj(gated)
        return output


def swiglu_function(x, gate):
    """
    函数式SwiGLU实现

    Args:
        x: 输入张量
        gate: 门控张量，与x相同shape

    Returns:
        SwiGLU(x, gate) = SiLU(gate) ⊙ x
    """
    return silu_function(gate) * x


if __name__ == "__main__":
    print("=== SiLU和SwiGLU激活函数测试 ===")

    # 创建测试数据
    batch_size, seq_len, dim = 2, 4, 512
    x = torch.randn(batch_size, seq_len, dim, requires_grad=True)
    print(f"输入张量: {x.shape}")

    # ===== SiLU测试 =====
    print(f"\n=== SiLU测试 ===")
    silu_layer = SiLUActivation()
    silu_output = silu_layer(x)

    # 与PyTorch对比
    torch_silu = F.silu(x)
    silu_match = torch.allclose(silu_output, torch_silu, atol=1e-6)
    print(f"SiLU与PyTorch一致: {silu_match}")
    print(f"SiLU输出shape: {silu_output.shape}")

    # ===== SwiGLU测试 =====
    print(f"\n=== SwiGLU测试 ===")

    # 创建SwiGLU层
    swiglu_layer = SwiGLU(dim=dim, hidden_dim=1024)
    swiglu_output = swiglu_layer(x)

    print(f"SwiGLU输出shape: {swiglu_output.shape}")  # 应该与输入相同
    print(f"输入输出shape一致: {swiglu_output.shape == x.shape}")

    # 显示SwiGLU内部结构
    print(f"SwiGLU内部结构:")
    print(f"  gate_proj: {dim} → {swiglu_layer.gate_proj.out_features}")
    print(f"  up_proj: {dim} → {swiglu_layer.up_proj.out_features}")
    print(f"  down_proj: {swiglu_layer.down_proj.in_features} → {dim}")

    # ===== 函数式SwiGLU测试 =====
    print(f"\n=== 函数式SwiGLU测试 ===")
    # 创建测试用的门控张量
    gate_tensor = torch.randn_like(x)
    func_swiglu_output = swiglu_function(x, gate_tensor)
    print(f"函数式SwiGLU输出shape: {func_swiglu_output.shape}")

    # ===== 特殊值测试 =====
    print(f"\n=== 特殊值测试 ===")
    test_vals = torch.tensor([0.0, 1.0, -1.0, 2.0, -2.0])
    print(f"输入: {test_vals.tolist()}")
    print(f"SiLU输出: {silu_function(test_vals).tolist()}")

    # SwiGLU特殊值测试
    gate_vals = torch.tensor([1.0, 0.0, -1.0, 2.0, -2.0])
    swiglu_vals = swiglu_function(test_vals, gate_vals)
    print(f"SwiGLU(x,gate): {swiglu_vals.tolist()}")

    # ===== 梯度测试 =====
    print(f"\n=== 梯度测试 ===")
    loss = swiglu_output.sum()
    loss.backward()
    print(f"梯度计算成功: {x.grad is not None}")
    print(f"gate_proj权重梯度: {swiglu_layer.gate_proj.weight.grad is not None}")
    print(f"up_proj权重梯度: {swiglu_layer.up_proj.weight.grad is not None}")
    print(f"down_proj权重梯度: {swiglu_layer.down_proj.weight.grad is not None}")

    # ===== 参数量对比 =====
    print(f"\n=== 参数量分析 ===")
    swiglu_params = sum(p.numel() for p in swiglu_layer.parameters())
    print(f"SwiGLU参数量: {swiglu_params:,}")

    # 标准FFN参数量对比
    standard_ffn_params = dim * 1024 * 2  # W1 + W2 (忽略bias)
    print(f"标准FFN参数量: {standard_ffn_params:,}")
    print(f"SwiGLU vs 标准FFN: {swiglu_params/standard_ffn_params:.2f}x")

    print(f"\n=== 测试完成 ===")
    print(f"SwiGLU特点:")
    print(f"- 门控机制提供更强的表达能力")
    print(f"- 使用SiLU作为门控激活函数")
    print(f"- 广泛用于现代大型语言模型(如PaLM、LLaMA等)")