import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np


# 创建数据加载器
class TextDataset(torch.utils.data.Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx, :-1], self.data[idx, 1:]


# 定义位置编码
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-torch.log(torch.tensor(10000.0)) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        return x + self.pe[:x.size(0), :]


# 定义 GPT 模型
class SimpleGPT(nn.Module):
    def __init__(self, vocab_size, d_model=256, nhead=8, num_decoder_layers=3):
        super(SimpleGPT, self).__init__()
        self.embedding = nn.Embedding(vocab_size, d_model)
        self.positional_encoding = PositionalEncoding(d_model)
        decoder_layer = nn.TransformerDecoderLayer(d_model, nhead)
        self.transformer = nn.TransformerDecoder(decoder_layer, num_decoder_layers)
        self.fc = nn.Linear(d_model, vocab_size)

    def forward(self, src, tgt):
        src = self.embedding(src) * np.sqrt(256)
        src = self.positional_encoding(src)

        tgt = self.embedding(tgt) * np.sqrt(256)
        tgt = self.positional_encoding(tgt)
        # 将 None 传递给 self.transformer 的第二个参数是不正确的，因为 Transformer 模型需要目标序列作为输入

        memory = self.transformer(src, tgt)  # 使用tgt作为目标序列
        output = self.fc(memory)
        return output
        # src = self.embedding(src) * np.sqrt(256)
        # #src = self.embedding(src) * np.sqrt(self.d_model)
        # src = self.positional_encoding(src)
        # memory = self.transformer(src, None)
        # output = self.fc(memory)
        # return output


# 构造虚拟的词汇表
vocab_size = 10000
data = np.random.randint(0, vocab_size, size=(1000, 10))  # 创建1000个长度为10的随机序列作为示例数据

# 划分数据集
train_data = data[:800]
test_data = data[800:]

train_dataset = TextDataset(train_data)
test_dataset = TextDataset(test_data)

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False)

# 初始化模型
model = SimpleGPT(vocab_size)

# 训练模型
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

num_epochs = 10
for epoch in range(num_epochs):
    model.train()
    total_loss = 0
    for batch_idx, (input_seq, target_seq) in enumerate(train_loader):
        optimizer.zero_grad()
        output = model(input_seq, target_seq)
        first = output.view(-1, vocab_size)
        second = target_seq.view(-1)
        loss = criterion(first, second)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    print(f"Epoch {epoch + 1}, Average Loss: {total_loss / len(train_loader)}")

# 测试模型
model.eval()
test_loss = 0
with torch.no_grad():
    for input_seq, target_seq in test_loader:
        output = model(input_seq, target_seq)
        loss = criterion(output.view(-1, vocab_size), target_seq.view(-1))
        test_loss += loss.item()
    print(f"Test Loss: {test_loss / len(test_loader)}")

if __name__ == '__main__':
    print("over")
