import math

import torch
import torch.nn as nn
import torch.nn.functional as F

class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=512):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1).float()
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self, x):
        return x + self.pe[:, :x.size(1)]

class GPT(nn.Module):
    def __init__(self, vocab_size, d_model=256, nhead=8, num_decoder_layers=6):
        super(GPT, self).__init__()
        self.embedding = nn.Embedding(vocab_size, d_model)
        self.pos_encoder = PositionalEncoding(d_model)
        decoder_layer = nn.TransformerDecoderLayer(d_model, nhead)
        self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers)
        self.fc = nn.Linear(d_model, vocab_size)

    def forward(self, x, mask):
        x = self.embedding(x)
        x = self.pos_encoder(x)
        output = self.transformer_decoder(x, mask)
        output = self.fc(output)
        return output

# 使用示例
vocab_size = 10000  # 假设词汇表大小为10000
model = GPT(vocab_size)

# 定义输入数据和mask
input_seq = torch.LongTensor([[1, 2, 3, 4], [5, 6, 7, 8]])  # 假设输入序列
mask = torch.BoolTensor([[1, 1, 1, 0], [1, 1, 0, 0]])  # 假设mask

output = model(input_seq, mask)
print(output.shape)  # 输出的形状




if __name__ == '__main__':
    print("==========over==========")
