import math
import torch
from torch import nn


def generate_data(learn_size):
    data, X, Y = [1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0], [], []
    for i in range(len(data) - learn_size):
        X.append(data[i:i + learn_size])
        Y.append(data[i + learn_size])
    return X, Y, len(data) - learn_size


class Attention(nn.Module):
    def __init__(self):
        super().__init__()
        self.mlp = nn.Sequential(nn.LayerNorm(embed_size))
        self.mlp.append(nn.Linear(embed_size, 4 * embed_size, bias=False))
        self.mlp.append(nn.GELU())
        self.mlp.append(nn.Linear(4 * embed_size, embed_size, bias=False))
        self.attention = nn.Sequential(nn.LayerNorm(embed_size))
        self.attention.append(nn.Linear(embed_size, 3 * embed_size, bias=False))
        self.projection = nn.Linear(embed_size, embed_size, bias=False)

    def get_embed(self, embed):
        # 前馈特征
        forward_embed = self.mlp(embed)
        # 注意力机制: 本身联系特征q，交互联系特征k，自身特征v
        q, k, v = self.attention(embed).split(embed_size, dim=2)
        # 多头注意力机制: embed拆分多个head，不同head关注不同特征
        k = k.view(batch_size, learn_size, head_num, embed_size // head_num).transpose(1, 2)
        q = q.view(batch_size, learn_size, head_num, embed_size // head_num).transpose(1, 2)
        v = v.view(batch_size, learn_size, head_num, embed_size // head_num).transpose(1, 2)
        attention = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))  # 点乘构建联系
        # mask注意力机制
        bias = torch.tril(torch.ones(learn_size, learn_size)).view(1, 1, learn_size, learn_size)  # 对角线右上置0
        attention = attention.masked_fill(bias[:, :, :learn_size, :learn_size] == 0, float('-inf'))
        attention = nn.functional.softmax(attention, dim=-1) @ v  # 点乘构建联系
        attention = attention.transpose(1, 2).contiguous().view(batch_size, learn_size, embed_size)
        attention = self.projection(attention)
        embed = embed + attention + forward_embed  # 残差连接思想
        return embed


class GPT(nn.Module):
    def __init__(self):
        super().__init__()
        self.build_network()
        self.optimizer = torch.optim.AdamW(self.parameters(), lr=1e-3, weight_decay=1e-1)

    def build_network(self):
        # 词向量机制
        self.token_embed = nn.Embedding(2, 16)
        self.position_embed = nn.Embedding(learn_size, 16)
        # 多头注意力机制
        self.attention = nn.ModuleList([Attention() for _ in range(4)])
        self.norm = nn.LayerNorm(16)
        self.output = nn.Linear(16, 2, bias=False)
        # Weight Tying机制
        self.token_embed.weight = self.output.weight

    def get_logits(self, input):
        position = torch.arange(0, learn_size, dtype=torch.long).unsqueeze(0)  # 字符位置信息
        token_embed = self.token_embed(input)
        position_embed = self.position_embed(position)
        embed = token_embed + position_embed
        for attention in self.attention:
            embed = attention.get_embed(embed)
        embed = self.norm(embed)
        logits = self.output(embed[:, -1, :])
        return logits

    def update(self, input, output):
        input = torch.tensor(input, dtype=torch.long)
        output = torch.tensor(output, dtype=torch.long)
        logits = self.get_logits(input)
        loss = nn.functional.cross_entropy(logits, output)
        loss.backward()
        self.optimizer.step()
        self.optimizer.zero_grad()
        return loss.detach().item()


vocab_num = 2  # 50304
learn_size = 3  # 1024
head_num = 4  # 12
embed_size = 16  # 768

X, Y, batch_size = generate_data(learn_size)

gpt = GPT()
for i in range(1000):
    loss = gpt.update(X, Y)
    print(f"{loss:.4f}") if i % 100 == 0 else None

start = [1, 0, 1]
batch_size = 1
full = start.copy()
for _ in range(20):
    input = torch.as_tensor(start)
    logits = gpt.get_logits(input)
    probs = nn.functional.softmax(logits, dim=-1)
    next_char = torch.multinomial(probs[0], num_samples=1).item()
    start = start[1:] + [next_char]
    full.append(next_char)

print("".join(map(str, full)))