import pandas as pd
import torch
from matplotlib import pyplot as plt
from torch import optim, nn
import warnings
from Generator import Generator
import numpy as np
from transformers import BertTokenizer, BertModel

warnings.filterwarnings("ignore")



# 定义构建新数据集的批处理方法
def get_batches(data, batch_size, seq_len):
    print(data)



if __name__ == '__main__':
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model_name = '../bert-base-chinese'
    model_path = '../bert-base-chinese'
    tokenizer = BertTokenizer.from_pretrained(model_name)
    bert = BertModel.from_pretrained(model_path)
    poems_name = "poetry1"
    # df=pd.read_csv("../datasets/"+poems_name+".csv")
    # print(df.sample(5))
    #
    # text_demo = df['text'][0]
    # sent_id = tokenizer.encode(text_demo,
    #                            add_special_tokens=True,
    #                            # 添加 [CLS] 和 [SEP] 标记
    #                            truncation=True,
    #                            # 指定序列的最大长度
    #                            max_length=100
    #                            )
    # att_mask = [int(tok > 0) for tok in sent_id]
    # print(text_demo)
    # print(tokenizer.encode(sent_id))
    # print(att_mask)

    with open("../datasets/" + poems_name + ".txt", encoding="utf8") as data:
        text = data.read()
    # 制作数据集变为输入模型的数据类型
    # 划分训练集和验证集
    val_len = int(np.floor(0.2 * len(text)))

    # train 和 val
    trainset = tokenizer.encode(text[:-val_len],
                                add_special_tokens=True,
                                # 添加 [CLS] 和 [SEP] 标记
                                truncation=True,
                                max_length=512
                                )

    print(trainset)
    validset = np.array(list(text[-val_len:]))

    print("trainset长度:" + str(trainset.shape[0]))
    print("validset长度:" + str(validset.shape[0]))

    # 定义超参数
    batch_size = 32
    seq_len = 5
    epochs = 5
    lr = 0.001

    # 创建模型对象
    model = Generator(bert,tokenizer)
    # 部署模型到device
    model = model.to(device)
    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=lr)
    # 损失函数（交叉熵）
    criterion = nn.CrossEntropyLoss()
    print("trainset:",trainset)
    print("validset:", validset)


    trainset_2 = tokenizer.tokenize(trainset)
    print(trainset_2)

    # # 保存损失值
    # train_loss = []
    # val_loss = []
    #
    # print(model.vocab.shape[0])  # 2569
    #
    # # 循环训练（验证）
    # for epoch in range(epochs):
    #     model.train()
    #     hs = None  # hs 等于 hidden_size,隐藏层结点
    #     train_ls = 0.0
    #     val_ls = 0.0
    #     for x_sent_id, x_att_mask, y_sent_id, y_att_mask in get_batches(trainset, batch_size, seq_len):
    #         # print("y.shape_1 : ", y.shape)  # (128, 10, 2569)
    #         # 每一轮循环，生成一批 数据+标签（data+target）
    #         optimizer.zero_grad()  # 梯度置零
    #         x = torch.tensor(x).float().to(device)  # 类型转换
    #         # print("x shape : ", x.shape)    # torch.Size([128, 10, 2569])
    #         # 模型训练
    #         out, hs = model(x, hs)  # 模型输出shape : （batch_size, sequence_length, hidden_size）
    #         hs = ([h.data for h in hs])  # 读取每一个hidden_size的结点
    #         # 对targets的one-hot encoding进行逆向转换
    #         y = y.reshape(-1, len(model.vocab))
    #         # print("y.shape_2 : ", y.shape)  # (1280, 2569)
    #         y = model.onehot_decode(y)
    #         # print("y.shape_3 : ", y.shape)  # (1280, 1)
    #         # 对y进行label encoding
    #         y = model.label_encode(y.squeeze())
    #         # print("y.shape_4 : ", y.shape)  # (1280,)
    #         # 类型转换
    #         y = torch.from_numpy(y).long().to(device)
    #         # print("y.shape_5 : ", y.shape)  # torch.Size([1280])
    #         # 计算损失函数
    #         loss = criterion(out, y.squeeze())
    #         # 反向传播
    #         loss.backward()
    #         # 参数更新
    #         optimizer.step()
    #         # 累计训练损失
    #         train_ls += loss.item()
    #
    #     if validset is not None:
    #         # 开始验证
    #         model.eval()
    #         hs = None
    #         with torch.no_grad():
    #             for x, y in get_batches(validset, batch_size, seq_len):
    #                 x = torch.tensor(x).float().to(device)
    #                 out, hs = model(x, hs)  # 预测输出
    #                 hs = ([h.data for h in hs])
    #
    #                 y = y.reshape(-1, len(model.vocab))
    #                 y = model.onehot_decode(y)
    #                 y = model.label_encode(y.squeeze())
    #
    #                 y = torch.from_numpy(y).long().to(device)
    #
    #                 loss = criterion(out, y.squeeze())
    #                 val_ls += loss.item()
    #
    #             val_loss.append(np.mean(val_ls))  # 求出每一轮的损失均值，并累计
    #
    #         train_loss.append(np.mean(train_ls))  # 求出每一轮的损失均值，并累计
    #
    #     print(f'--------------Epochs{epochs} | {epoch}---------------')
    #     print(f'Train Loss : {train_loss[-1]}')  # 这里-1为最后添加进去的loss值，即本轮batch的loss
    #     if val_loss:
    #         print(f'Val Loss : {val_loss[-1]}')
    #
    # # 绘制loss曲线
    # plt.plot(train_loss, label='Train Loss')
    # plt.plot(val_loss, label='Val Loss')
    # plt.title('Loss vs Epochs')
    # plt.legend()
    # plt.show()
    #
    # # 保存模型
    # torch.save(model, 'models/'+poems_name+'.pth')
    #
    #
