import torch
import torch.nn as nn
import matplotlib.pyplot as plt



GPT_CONFIG_124M = {
    "vocab_size": 50257,  # 词表大小 vocabulary size
    "context_length": 1024,  # 上下文长度 context length
    "emb_dim": 768,  # 嵌入维度 embedding dimension
    "n_heads": 12,  # 注意力头数 number of attention heads
    "n_layer": 12,  # 模型层数 number of layers
    "drop_rate": 0.1,  # 丢弃概率 dropout rate
    "qkv_bias": False,  # 是否使用qkv偏置 qkv bias
}



class GeLU(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        return 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor([2.0 / torch.pi]))) * (x + 0.044715 * torch.pow(x, 3)))


class FeedForward(nn.Module):
    def __init__(self, cfg):
        super().__init__()
        self.layers = nn.Sequential(
            nn.Linear(cfg["emb_dim"], 4 * cfg["emb_dim"]),
            GeLU(),
            nn.Linear(4 * cfg["emb_dim"], cfg["emb_dim"]),
        )

    def forward(self, x):
        return self.layers(x)


def plot_gelu():
    gelu, relu = GeLU(), nn.ReLU()

    x = torch.linspace(-3, 3, 100)
    y_gelu, y_relu = gelu(x), relu(x)
    plt.figure(figsize=(8, 3))

    for i, (y, label) in enumerate(zip([y_gelu, y_relu], ["GELU", "RELU"]), 1):
        plt.subplot(1, 2, i)
        plt.plot(x, y)
        plt.title(f"{label} activation function")
        plt.xlabel("x")
        plt.ylabel(f"{label}(x)")
        plt.grid(True)
    plt.tight_layout()
    plt.show()


if __name__ == "__main__":
    ffn = FeedForward(GPT_CONFIG_124M)
    x = torch.rand(2, 3, 768)
    out = ffn(x)
    print("out.shape: ", out.shape)
    print(out)