import os
import math
import time
import numpy as np

import torch
from torch import nn
import torch.optim as optim

from RNNModel import RNNModel
from load_dataset import load_data_text
from utils import *


def predict_rnn(prefix, num_preds, model, vocab, device):
    # 设为评估模式
    model.eval()
    """在prefix后面生成新字符, 生成数量为num_preds"""
    state = model.init_state(batch_size=1, device=device)
    outputs = [vocab[prefix[0]]]  # 得到prefix[0]的下标
    get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))  # 将最新预测的词作为输入
    # 预热期，比如"some guy saying it was "这几个词，要提前保存到state中，方便之后预测
    for y in prefix[1:]:  # 预热期
        _, state = model.forward(get_input(), state)
        outputs.append(vocab[y])
    for _ in range(num_preds):  # 预测num_preds步
        y, state = model.forward(get_input(), state)
        outputs.append(int(y.argmax(dim=1).reshape(1)))
    predict_seq = ''.join([vocab.idx_to_token[i] for i in outputs])
    return predict_seq


def train_epoch(model, optimizer, data_loader, device, use_random_iter):
    """训练一个epoch"""
    # 设为训练模式
    model.train()
    iter = 0
    state = None
    epoch_loss = 0
    start = time.time()
    # 保存困惑度计算所需
    metric = [0, 0]
    for iter, (X, Y) in enumerate(data_loader):
        batch_size = X.size(0)
        state = init_state(model, state, batch_size, device, use_random_iter)
        y = Y.T.reshape(-1)
        X, y = X.to(device), y.to(device)
        y_hat, state = model.forward(X, state)
        # 计算loss后反向传播
        loss = model.loss(y_hat, y.long())
        optimizer.zero_grad()
        loss.backward()
        # 梯度裁剪
        grad_clipping(model, max_norm=1)
        optimizer.step()
        epoch_loss += loss.cpu().detach().item()
        # 放便之后计算困惑度
        # y.numel()：y中元素的个数
        metric[0] += y.numel() * loss.cpu().detach().item()
        metric[1] += y.numel()

    epoch_loss /= (iter + 1)
    epoch_time = time.time() - start
    perplexity = math.exp(metric[0] / metric[1])
    speed = metric[1] / epoch_time
    return epoch_loss, perplexity, speed, epoch_time, optimizer


class Config():
    """参数设置"""
    def __init__(self):
        self.file_path = "../DataSet/Xue_lyric.txt"
        self.batch_size = 32
        # 每次取句子的长度
        self.num_steps = 30
        self.num_epochs = 100
        self.learning_rate = 0.001
        self.device = gpu_setup(True, 0)

        # self.input_size = 100
        self.hidden_size = 256
        self.num_layers = 1
        self.bidirectional = False
        self.is_gru = True


def main():
    config = Config()
    data_loader, vocab = load_data_text(config.file_path, config.batch_size, config.num_steps)
    vocab_size = len(vocab)
    model = RNNModel(input_size=vocab_size,
                     hidden_size=config.hidden_size,
                     num_layers=config.num_layers,
                     bidirectional=config.bidirectional,
                     is_gru=config.is_gru
                     )

    model = model.to(config.device)
    optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)

    # 预测句子，查看效果
    predict_len = 30
    predict = lambda prefix: predict_rnn(prefix, predict_len, model, vocab, config.device)

    for epoch in range(config.num_epochs):
        epoch_loss, perplexity, speed, epoch_time, optimizer = train_epoch(model, optimizer, data_loader, config.device, use_random_iter=False)
        print("epoch:{:d} | loss:{:.3f} | perplexity:{:.3f} | speed:{:.3f} | time:{:.3f}s".format(
            epoch + 1, epoch_loss, perplexity, speed, epoch_time))
        # 每10个epoch预测看效果
        if (epoch + 1) % 10 == 0:
            print(predict(['我们']))
            print(predict(['爱']))
            print(predict(['自己']))
            print(predict(['爱', '过']))

    print("最终预测效果：")
    print(predict(['我们']))
    print(predict(['爱']))
    print(predict(['自己']))
    print(predict(['爱', '过']))


if __name__ == '__main__':
    main()
