import torch
from torch import nn
import torch.nn.functional as F


# 第一层 embdding 层
class EmbddingDiy(nn.Module):
    def __init__(self, victorsize, hiddensize=512):
        super().__init__()
        self.emb = nn.Embedding(victorsize, hiddensize, padding_idx=0)
        self.position_emb = nn.Embedding(hiddensize, hiddensize)
        self.token_emb = nn.Embedding(2, hiddensize)
        self.layer_norm = nn.LayerNorm(hiddensize)

    def forward(self, input):
        emb = self.emb(input)
        position_emb = self.position_emb(torch.arange(len(input)).to(input.device))
        token_emb = self.token_emb(torch.zeros(len(input)).long().to(input.device))
        return self.layer_norm(emb + position_emb + token_emb)


class SingleTransformaer(nn.Module):
    def __init__(self, victorsize, hiddensize=512):
        super().__init__()
        self.ql = nn.Linear(hiddensize, hiddensize)
        self.kl = nn.Linear(hiddensize, hiddensize)
        self.vl = nn.Linear(hiddensize, hiddensize)
        self.feedforward = nn.Linear(hiddensize, hiddensize * 4)
        self.feedforward1 = nn.Linear(hiddensize * 4, hiddensize)
        self.layer_norm = nn.LayerNorm(hiddensize)
        self.layer_norm1 = nn.LayerNorm(hiddensize)

    def transpose_for_scores(self, x, attention_head_size, num_attention_heads):
        batch, length, hiddensize = x.shape
        return x.view(batch, length, num_attention_heads, attention_head_size).transpose(1, 2)

    def forward(self, x, attention_head_size, num_attention_heads):
        # 多头注意力
        residual = x
        q = self.ql(self.layer_norm(x))
        k = self.kl(self.layer_norm(x))
        v = self.vl(self.layer_norm(x))

        q = self.transpose_for_scores(q, attention_head_size, num_attention_heads)
        k = self.transpose_for_scores(k, attention_head_size, num_attention_heads)
        v = self.transpose_for_scores(v, attention_head_size, num_attention_heads)

        attn = F.softmax(q @ k.transpose(-1, -2) / torch.sqrt(torch.tensor(attention_head_size)), dim=-1)
        attn_out = (attn @ v).transpose(1, 2).reshape_as(x)

        x = residual + attn_out

        residual = x
        x = self.feedforward1(F.gelu(self.feedforward(self.layer_norm1(x))))
        x = residual + x

        return x


class BertDiy(nn.Module):
    def __init__(self, victorsize, hiddensize=512, num_layers=6, num_heads=8):
        super().__init__()
        self.emb = EmbddingDiy(victorsize, hiddensize)
        self.layers = nn.ModuleList([SingleTransformaer(victorsize, hiddensize) for _ in range(num_layers)])
        self.head_size = hiddensize // num_heads
        self.num_heads = num_heads
        self.cls = nn.Linear(hiddensize, 2)

    def forward(self, x):
        x = self.emb(x)
        for layer in self.layers:
            x = layer(x, self.head_size, self.num_heads)
        return self.cls(x[:, 0])