import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import jieba
import re

# 检查是否有可用的CUDA设备，将设备设置为GPU或CPU
from torch import Tensor

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def batchify(data: Tensor, bsz: int) -> Tensor:
    seq_len = data.size(0) // bsz
    data = data[:seq_len * bsz]
    data = data.view(bsz, seq_len)
    # data = data.view(bsz, seq_len).t().contiguous()
    return data.to(device)


def split_chinese(sentence):
    # 使用jieba进行分词
    words = jieba.lcut(sentence)
    # 利用正则表达式过滤汉字以外的字符
    pattern = re.compile(r'[\w\s]')
    words = [word for word in words if pattern.search(word)]
    return words


# 创建数据加载器
class TextDataset(torch.utils.data.Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx, :-1], self.data[idx, 1:]


# 定义位置编码
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-torch.log(torch.tensor(10000.0)) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        return x + self.pe[:x.size(0), :]


# 定义 GPT 模型
class SimpleGPT(nn.Module):
    def __init__(self, vocab_size, d_model=256, nhead=8, num_decoder_layers=3):
        super(SimpleGPT, self).__init__()
        self.embedding = nn.Embedding(vocab_size, d_model)
        self.positional_encoding = PositionalEncoding(d_model)
        decoder_layer = nn.TransformerDecoderLayer(d_model, nhead)
        self.transformer = nn.TransformerDecoder(decoder_layer, num_decoder_layers)
        self.fc = nn.Linear(d_model, vocab_size)

    def forward(self, src, tgt):
        src = self.embedding(src) * np.sqrt(256)
        src = self.positional_encoding(src)

        tgt = self.embedding(tgt) * np.sqrt(256)
        tgt = self.positional_encoding(tgt)
        # 将 None 传递给 self.transformer 的第二个参数是不正确的，因为 Transformer 模型需要目标序列作为输入
        memory = self.transformer(src, tgt)  # 使用tgt作为目标序列
        output = self.fc(memory)
        return output


def digitToSentence(output_sequence):
    generated_words = [genVocab[word] for word in output_sequence.numpy() if word in genVocab]
    generated_answer = ''.join(generated_words)
    return generated_answer


num_epochs = 500
batch_size = 10
# 训练数据
news_article = "我听见回声，来自山谷和心间；以寂寞的镰刀收割空旷的灵魂；不断地重复决绝，又重复幸福；终有绿洲摇曳在沙漠；我相信自己，生来如同璀璨的夏日之花，不凋不败，妖冶如火，承受心跳的负荷和呼吸的累赘，乐此不疲！我听见音乐，来自月光和胴体；辅极端的诱饵捕获飘渺的唯美；一生充盈着激烈，又充盈着纯然；总有回忆贯穿于世间；我相信自己，死时如同静美的秋日落叶，不盛不乱，姿态如烟，即便枯萎也保留丰肌清骨的傲然，玄之又玄！"
word_list = split_chinese(news_article)
genVocab = {i: word for i, word in enumerate(set(word_list))}
vocab = {word: i for i, word in enumerate(set(word_list))}
vocab_size = len(vocab)

data = []
for i in range(len(word_list) - 1):
    data.append(vocab[word_list[i]])
train_source = torch.tensor(data)
train_data = batchify(train_source, batch_size)
np_train_data = np.array(train_data)
train_dataset = TextDataset(np_train_data)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

model = SimpleGPT(vocab_size)

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

for epoch in range(num_epochs):
    model.train()
    total_loss = 0
    for batch_idx, (input_seq, target_seq) in enumerate(train_loader):
        optimizer.zero_grad()
        inputSen = digitToSentence(input_seq.view(-1))
        targetSen = digitToSentence(target_seq.view(-1))
        output = model(input_seq, target_seq)
        predicted_index = torch.argmax(output, dim=-1)[0, -1].item()
        predicted_word = genVocab[predicted_index]
        print(predicted_word)

        first = output.view(-1, vocab_size)
        second = target_seq.view(-1)
        loss = criterion(first, second)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    print(f"Epoch {epoch + 1}, Average Loss: {total_loss / len(train_loader)}")

if __name__ == '__main__':
    print("==========over==========")
