import math

import jieba
import re
import torch
from torch import Tensor
import torch.nn as nn
import torch.optim as optim
import numpy as np
from typing import Tuple

# 检查是否有可用的CUDA设备，将设备设置为GPU或CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def batchify(data: Tensor, bsz: int) -> Tensor:
    seq_len = data.size(0) // bsz
    data = data[:seq_len * bsz]
    data = data.view(bsz, seq_len).t().contiguous()
    return data.to(device)


# 获取批次数据
def get_batch(source: Tensor, i: int) -> Tuple[Tensor, Tensor]:
    # 计算当前批次的序列长度，最大为bptt，确保不超过source的长度
    seq_len = min(bptt, len(source) - 1 - i)

    # 获取data，从i开始，长度为seq_len
    data = source[i:i + seq_len]

    # 获取target，从i+1开始，长度为seq_len，并将其形状转换为一维张量
    target_src = source[i + 1:i + 1 + seq_len]
    target = target_src.reshape(-1)

    return data, target


def split_chinese(sentence):
    # 使用jieba进行分词
    words = jieba.lcut(sentence)
    # 利用正则表达式过滤汉字以外的字符
    pattern = re.compile(r'[\w\s]')
    words = [word for word in words if pattern.search(word)]
    return words


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        '''
        x: [seq_len, batch_size, d_model]
        '''
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)


# 定义数据加载器
class TextDataset(torch.utils.data.Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]


# 修改 GPT 模型以适应新的训练数据和问答系统
class SimpleGPT(nn.Module):
    def __init__(self, vocab_size, d_model=256, nhead=8, num_decoder_layers=3):
        super(SimpleGPT, self).__init__()
        self.embedding = nn.Embedding(vocab_size, d_model)
        self.positional_encoding = PositionalEncoding(d_model)
        decoder_layer = nn.TransformerDecoderLayer(d_model, nhead)
        self.transformer = nn.TransformerDecoder(decoder_layer, num_decoder_layers)
        # d_model 是输入特征的维度，vocab_size 是输出特征的维度
        self.fc = nn.Linear(d_model, vocab_size)

    def forward(self, src, tgt):
        # 这里的src, tgt不能传一维数组，不能经过embedding和positional_encoding就会变成3维数量不等
        self.d_model = 256
        src = self.embedding(src) * np.sqrt(self.d_model)
        src = self.positional_encoding(src)

        tgt = self.embedding(tgt) * np.sqrt(self.d_model)
        tgt = self.positional_encoding(tgt)

        memory = self.transformer(src, tgt)  # 使用tgt作为目标序列
        output = self.fc(memory)
        return output


# 测试问答系统
def generate_answer(question, model, vocab):
    # 将问题转换为模型可接受的输入格式
    question_words = split_chinese(question)
    input_data = [vocab[word] for word in question_words if word in vocab]
    input_tensor = torch.tensor(input_data).unsqueeze(0)  # 添加 batch 维度

    # 在这里假设我们的模型可以生成固定长度的输出
    target_length = 10  # 假设生成包含10个单词的回答

    model.eval()
    with torch.no_grad():
        output_sequence = []  # 存储模型生成的输出序列
        for i in range(target_length):
            output = model(input_tensor, input_tensor)  # 这里将输入作为目标序列传入
            predicted_index = torch.argmax(output, dim=-1)[0, -1].item()
            output_sequence.append(predicted_index)
            input_tensor = torch.cat([input_tensor, torch.tensor([[predicted_index]])], dim=-1)  # 将预测的单词拼接到输入序列的末尾

    # 将模型生成的单词序列转换回文本
    generated_words = [genVocab[word] for word in output_sequence if word in genVocab]
    generated_answer = ''.join(generated_words)
    return generated_answer


batch_size = 10
bptt = 2
# 训练数据
news_article = "心理学中有个说法被称之为“五分钟法则”，也叫五分钟定律。当然了，还有人称其为“五分钟起步法”。可以这么理解：当你不愿意做某件事情的时候，可以给自己5分钟尝试准备的时间，不要管成与不成，先开始5分钟然后再决定要不要做下去"
word_list = split_chinese(news_article)
genVocab = {i: word for i, word in enumerate(set(word_list))}
# 创建词汇表
vocab = {word: i for i, word in enumerate(set(word_list))}
vocab_size = len(vocab)

data = []
for i in range(len(word_list) - 1):
    data.append(vocab[word_list[i]])
train_source = torch.tensor(data)
train_data = batchify(train_source, batch_size)
print(train_data)
# =====数据构造====
# num_batches = len(train_data) // bptt  # 计算总的batch数量
#     for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):
#
#         data, targets = get_batch(train_data, i)  # 获取当前batch的数据和目标
#         output = model(data)  # 前向传播
#         # ntokens词汇表的大小： (35,20,28782) ---》（700，28782），target：（700）
#         output_flat = output.view(-1, ntokens)
#         loss = criterion(output_flat, targets)  # 计算损失
# ========over=======

# # 构造训练数据
# data = []
#
# for i in range(len(word_list) - 1):
#     input_word = word_list[i]
#     target_word = word_list[i + 1]
#     data.append((vocab[input_word], vocab[target_word]))
# # data的长度是55，DataLoader是每批次最大32切割，所以一轮训练是第1批次32，第2批次23
# train_dataset = TextDataset(data)
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)

# 初始化模型
model = SimpleGPT(vocab_size)

# 训练模型
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

num_epochs = 10
for epoch in range(num_epochs):  # 循环训练轮数
    for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):
        data, targets = get_batch(train_data, i)  # 获取当前batch的数据和目标
        output = model(data,targets)  # 前向传播
        # ntokens词汇表的大小： (35,20,28782) ---》（700，28782），target：（700）
        output_flat = output.view(-1, vocab_size)
        loss = criterion(output_flat, targets)  # 计算损失
        # print(outputs.shape)
        # 将一维 targets 扩展为与 outputs 相匹配的形状
        # loss = criterion(outputs.view(-1, vocab_size), targets.view(-1))

        # expanded_targets = targets.unsqueeze(1).unsqueeze(2).expand(-1, len(targets), vocab_size)
        # loss = criterion(outputs, expanded_targets.type_as(outputs))  # 计算损失

        loss.backward()  # 反向传播，计算梯度
        optimizer.step()  # 更新模型参数
    print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')  # 打印当前训练轮数和损失值


def main():
    # 创建模型和词汇表（假设已经定义）
    gpt_model = SimpleGPT(vocab_size)
    # 生成回答
    question = "什么是5分钟法则?"
    answer = generate_answer(question, gpt_model, vocab)
    print("Question:", question)
    print("Answer:", answer)


if __name__ == "__main__":
    main()
