import torch
import torch.nn as nn
from torch.nn import Transformer
import torch.optim as optim
import torch.utils.data as data

# 为了简单起见，我们将使用一个小型的聊天数据集进行训练
# 你可以根据需要替换成更大的训练数据

# 聊天数据集
data_train = [
    ("你好", "你好啊"),
    ("最近过得怎么样？", "还不错，谢谢"),
    ("你想聊些什么呢？", "随便聊点什么吧"),
    ("最近有什么新鲜事吗？", "最近我在学习新的技术"),
    ("你喜欢哪种类型的电影？", "我喜欢喜剧片"),
    ("明天准备去哪儿玩？", "我还没有计划呢"),
]

# 定义一个简单的数据预处理函数
def preprocess_data(data):
    src_data = []
    tgt_data = []
    for src, tgt in data:
        src_data.append(src.lower())
        tgt_data.append(tgt.lower())
    return src_data, tgt_data



# 将文本序列转换为索引序列
def text_to_indices(text, word2idx):
    indices = [word2idx[word] for word in text.split()]
    return torch.tensor(indices, dtype=torch.long)

# 定义一个数据集类
class ChatDataset(data.Dataset):
    def __init__(self, src_data, tgt_data, word2idx):
        self.src_data = src_data
        self.tgt_data = tgt_data
        self.word2idx = word2idx

    def __getitem__(self, index):
        src_text = text_to_indices(self.src_data[index], self.word2idx)
        tgt_text = text_to_indices(self.tgt_data[index], self.word2idx)
        return src_text, tgt_text

    def __len__(self):
        return len(self.src_data)



# 定义模型
class TransformerModel(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim):
        super(TransformerModel, self).__init__()
        self.embed = nn.Embedding(vocab_size, embedding_dim)
        self.transformer = Transformer(embedding_dim, nhead=2, num_encoder_layers=2,
                                       num_decoder_layers=2, dim_feedforward=hidden_dim)
        self.fc = nn.Linear(embedding_dim, vocab_size)

    def forward(self, src, tgt):
        src_emb = self.embed(src)
        tgt_emb = self.embed(tgt)

        src_emb = src_emb.permute(1, 0, 2)
        tgt_emb = tgt_emb.permute(1, 0, 2)

        output = self.transformer(src_emb, tgt_emb)
        output = self.fc(output)

        return output

def train():
    src_data, tgt_data = preprocess_data(data_train)

    # 构建词汇表
    vocab = set()
    for src, tgt in zip(src_data, tgt_data):
        vocab.update(src.split())
        vocab.update(tgt.split())

    vocab = list(vocab)
    word2idx = {word: i for i, word in enumerate(vocab)}
    idx2word = {i: word for i, word in enumerate(vocab)}
    # 创建数据集实例
    dataset = ChatDataset(src_data, tgt_data, word2idx)

    # 实例化模型
    model = TransformerModel(len(vocab), 64, 128)

    # 定义训练参数
    num_epochs = 10
    batch_size = 2
    lr = 0.001

    # 定义优化器和损失函数
    optimizer = optim.Adam(model.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()

    # 计算数据集大小和批次数
    dataset_size = len(dataset)
    num_batches = dataset_size // batch_size
    if dataset_size % batch_size != 0:
        num_batches += 1

    # 创建数据加载器
    train_loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True)

    # 进行模型训练
    for epoch in range(num_epochs):
        for batch_idx, (src_batch, tgt_batch) in enumerate(train_loader):
            optimizer.zero_grad()
            output = model(src_batch, tgt_batch)
            loss = criterion(output.view(-1, output.size(-1)), tgt_batch.view(-1))
            loss.backward()
            optimizer.step()
        print("Epoch [{}/{}], Loss: {:.4f}".format(epoch+1, num_epochs, loss.item()))

    # 测试模型
    test_input = "你好"
    test_input_indices = text_to_indices(test_input, word2idx)
    test_input_indices = test_input_indices.unsqueeze(1).to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
    # todo
    # generated_indices = model.generate(test_input_indices)
    # generated_text = [idx2word[idx.item()] for idx in generated_indices.squeeze()]
    # print("输入: {}".format(test_input))
    # print("输出: {}".format(" ".join(generated_text)))

# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    train()