import torch
from torch import nn, optim
import torch.nn.functional as F
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from lstm_model import lstm_model

device = 'cuda' if torch.cuda.is_available() else 'cpu'

# 定义构建新数据集的批处理方法
def get_batches(data, batch_size, seq_len):
    """
    参数
    -------------
    data : 源数据，输入格式（num_samples, num_features）
    batch_size : batch的大小
    seq_len : 序列的长度（跨度）

    返回
    -------------
    新的数据集，格式：（batch_size, seq_len, num_features）
    """

    num_features = data.shape[1]  # 数据的列数，即特征数
    # print("num_features : ", num_features)  # 2569
    num_chars = batch_size * seq_len  # 一个batch_size的字符数量（1280个字符，128 * 10 =  1280）
    # print("num_chars : ", num_chars)    # 1280
    num_batches = int(np.floor(len(data) / num_chars))  # 计算出有多少个batches（本案例中文本数据最多有13个batches）
    # print("num_batches : ", num_batches)    # 13
    need_chars = num_batches * num_chars  # 根据batch_size 和 batches 计算出所需的总字符数量（本案例共需16640个字符）
    # print("need_chars : ", need_chars)  # 16640
    targets = np.append(data[1:], data[0]).reshape(data.shape)  # target标签数据，注意：标签数据是往后全部挪一位
    # print("targets shape ", targets.shape) # (17352, 2569)
    # print("data.shape : ", data.shape) # (17352, 2569)

    # 去掉无法组成一个完整batch的字符
    inputs = data[:need_chars]  # 从原始数据data中截取所需的字符数量need_words
    targets = targets[:need_chars]  # 从原始标签targets中截取所需的字符数量need_words

    #   比如一个batch里面序列是abcdef
    #   我输入是        abcdef
    #   我标签是        bcdefa


    # print("inputs.shape : ", inputs.shape)      #(16640, 2569)
    # print("targets.shape : ", targets.shape)    #(16640, 2569)

    # shape转换
    inputs = inputs.reshape(batch_size, -1, num_features)
    targets = targets.reshape(batch_size, -1, num_features)
    # print("inputs reshape : ", inputs.shape)    # (128, 130, 2569)
    # print("targets reshape : ", targets.shape)  # (128, 130, 2569)

    # 构建新的数据集
    for i in range(0, inputs.shape[1], seq_len):
        x = inputs[:, i: i + seq_len]
        y = targets[:, i: i + seq_len]
        # print("x",x.shape)
        # print("y",y.shape)
        # print(model.onehot_decode(x[1]))
        # print(model.onehot_decode(y[1]))
        yield x, y


# 定义训练函数
def train(model, data, batch_size, seq_len, epochs, lr=0.01, valid=None):
    '''
    参数说明
    -----------
    model : 定义的字符级网络模型
    data  : 文本数据
    batch_size : 一个batch多少个数据
    seq_len : 序列长度（步长）
    epochs : 训练循环次数
    lr : 学习率
    valid : 验证数据
    '''
    # 是否有cuda
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    # 部署模型到device
    model = model.to(device)
    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=lr)
    # 损失函数（交叉熵）
    criterion = nn.CrossEntropyLoss()
    # 判断是否有valid数据（即是否边训练边验证）
    if valid is not None:
        data = model.onehot_encode(data.reshape(-1, 1))
        valid = model.onehot_encode(valid.reshape(-1, 1))
    else:
        data = model.onehot_encode(data.reshape(-1, 1))
    # 保存损失值
    train_loss = []
    val_loss = []

    print(model.vocab.shape[0])         # 2569

    # 循环训练（验证）
    for epoch in range(epochs):
        model.train()
        hs = None  # hs 等于 hidden_size,隐藏层结点
        train_ls = 0.0
        val_ls = 0.0
        for x, y in get_batches(data, batch_size, seq_len):
            # print("y.shape_1 : ", y.shape)  # (128, 10, 2569)
            # 每一轮循环，生成一批 数据+标签（data+target）
            optimizer.zero_grad()  # 梯度置零
            x = torch.tensor(x).float().to(device)  # 类型转换
            # print("x shape : ", x.shape)    # torch.Size([128, 10, 2569])
            # 模型训练
            out, hs = model(x, hs)  # 模型输出shape : （batch_size, sequence_length, hidden_size）
            hs = ([h.data for h in hs])  # 读取每一个hidden_size的结点
            # 对targets的one-hot encoding进行逆向转换
            y = y.reshape(-1, len(model.vocab))
            # print("y.shape_2 : ", y.shape)  # (1280, 2569)
            y = model.onehot_decode(y)
            # print("y.shape_3 : ", y.shape)  # (1280, 1)
            # 对y进行label encoding
            y = model.label_encode(y.squeeze())
            # print("y.shape_4 : ", y.shape)  # (1280,)
            # 类型转换
            y = torch.from_numpy(y).long().to(device)
            # print("y.shape_5 : ", y.shape)  # torch.Size([1280])
            # 计算损失函数
            loss = criterion(out, y.squeeze())
            # 反向传播
            loss.backward()
            # 参数更新
            optimizer.step()
            # 累计训练损失
            train_ls += loss.item()

        if valid is not None:
            # 开始验证
            model.eval()
            hs = None
            with torch.no_grad():
                for x, y in get_batches(valid, batch_size, seq_len):
                    x = torch.tensor(x).float().to(device)
                    out, hs = model(x, hs)  # 预测输出
                    hs = ([h.data for h in hs])

                    y = y.reshape(-1, len(model.vocab))
                    y = model.onehot_decode(y)
                    y = model.label_encode(y.squeeze())

                    y = torch.from_numpy(y).long().to(device)

                    loss = criterion(out, y.squeeze())
                    val_ls += loss.item()

                val_loss.append(np.mean(val_ls))  # 求出每一轮的损失均值，并累计

            train_loss.append(np.mean(train_ls))  # 求出每一轮的损失均值，并累计

        print(f'--------------Epochs{epochs} | {epoch}---------------')
        print(f'Train Loss : {train_loss[-1]}')  # 这里-1为最后添加进去的loss值，即本轮batch的loss
        if val_loss:
            print(f'Val Loss : {val_loss[-1]}')

    # 绘制loss曲线
    plt.plot(train_loss, label='Train Loss')
    plt.plot(val_loss, label='Val Loss')
    plt.title('Loss vs Epochs')
    plt.legend()
    plt.show()


if __name__ == '__main__':
    poems_name = "poetry1"

    # 获取数据
    with open("datasets/"+poems_name+".txt", encoding="utf8") as data:
        text = data.read()

    # 筛选出文本数据中不同的字符
    # set发现重复的数字，会自动过滤掉,列表不会
    vocab = np.array(sorted(set(text)))

    # 字符的数量
    vocab_size = len(vocab)

    # 划分训练集和验证集
    val_len = int(np.floor(0.2 * len(text)))

    # train 和 val
    trainset = np.array(list(text[:-val_len]))
    print(trainset)
    validset = np.array(list(text[-val_len:]))

    print("trainset长度:"+str(trainset.shape[0]))
    print("validset长度:"+str(validset.shape[0]))


    # 定义超参数
    hidden_size = 512
    num_layers = 2
    batch_size = 256
    seq_len = 5
    epochs = 20
    lr = 0.01

    # 创建模型对象
    model = lstm_model(vocab, hidden_size, num_layers)


    train(model, trainset, batch_size, seq_len, epochs, lr=lr, valid=validset)
    torch.save(model, 'models/'+poems_name+'.pth')







