import torch
from torch import nn
from utils.PositionEncoding import PositionEncoding


class GPTSimpleModel(nn.Module):
    def __init__(self, vocab_size, embedding_dims, nhead, num_layers):
        super().__init__()
        self.embedding_dims = embedding_dims
        self.vocab_size = vocab_size

        self.embedding = nn.Embedding(vocab_size, embedding_dims)
        self.position_encode = PositionEncoding(embedding_dims, 2000, 0.2)
        # 由于TransformerDecoderLayer 需要Encoder-Decoder Attention结果，因此GPT实际采用Encoder写法
        self.decoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dims, nhead=nhead, batch_first=True)
        self.transformer_decoder = nn.TransformerEncoder(self.decoder_layer, num_layers=num_layers)

        self.ln = nn.Linear(embedding_dims, vocab_size)

    def forward(self, x):
        batch_size = x.shape[0]
        seq_len = x.shape[1]
        # 因果掩码（解码器独有的内容）
        attn_mask = nn.Transformer.generate_square_subsequent_mask(seq_len)
        x = self.embedding(x)
        x = self.position_encode(x)
        x = self.transformer_decoder(x, mask=attn_mask, is_causal=True)
        x = x.reshape(-1, self.embedding_dims)
        x = self.ln(x)
        return x.reshape(batch_size, -1, self.vocab_size)


if __name__ == '__main__':
    inputs = torch.randint(0, 10, (10, 9))
    vocab_size = 10  # word分类
    embedding_size = 32  # 词嵌入大小
    nhead = 4  # 多头注意力的头数
    model = GPTSimpleModel(vocab_size, embedding_size, nhead, 3)
    outputs = model(inputs)
    print(outputs.shape)  # [10, 9, 10]
