import torch
from itcast_transformer_utils import greedy_decode
from torch.autograd import Variable

from itcast_transformer_utils import Batch
# 优化器工具，该工具用于获得标准的针对Transformer模型的优化器
# 基于Adam优化器，使其对seq2seq的任务更为有效
from itcast_transformer_utils import get_std_opt
# 该工具用于标签平滑，就是小幅度地改变原有标签的值域，防止过拟合
from itcast_transformer_utils import LabelSmoothing
# 和标签平滑配合使用，类似于交叉熵损失函数
from itcast_transformer_utils import SimpleLossCompute
from itcast_transformer_utils import run_epoch
import matplotlib.pyplot as plt
import numpy as np
from transformer01 import make_model


def data_generator(V, batch_size, num_batch):
    """
    该函数用于随机生成copy任务的数据
    :param V: 随机生成的最大数字（不包括此数字）
    :param batch_size: 每次输送给模型的数据量
    :param num_batch: 总轮次
    :return:
    """
    for i in range(num_batch):
        # numpy.random.randint(low, high=None, size=None, dtype=None)
        #     low：生成随机整数的最小值（包含）。
        #     high：生成随机整数的最大值（不包含）。如果未指定，则 high 默认为 low，而 low 默认为 0。
        #     size：输出数组的形状。如果未指定，则返回单个随机整数。
        #     dtype：输出数组的数据类型。如果未指定，则默认为 int。
        # 所以randint(10)等同于randint(0,10) 即在[0,10)之间随机一个数
        data = torch.from_numpy(np.random.randint(1, V, size=(batch_size, 10), dtype=np.int64))

        data[:, 0] = 1
        source = data.requires_grad_(False)
        target = data.requires_grad_(False)
        yield Batch(source, target)


def run(model, loss, epochs=20):
    for epoch in range(epochs):
        model.train()
        run_epoch(data_generator(V, 8, 50), model, loss)

        model.eval()
        run_epoch(data_generator(V, 8, 5), model, loss)
    # 模型进入测试模式
    model.eval()
    ranint = np.random.randint(10, size=(1, 10))
    print(ranint)
    # exit(0)
    source = Variable(torch.LongTensor(ranint))
    source_mask = Variable(torch.ones(1, 1, 10))
    result = greedy_decode(model, source, source_mask, max_len=10, start_symbol=1)
    print(result)
    print(source == result)


if __name__ == '__main__':

    V = 11
    batch_size = 20
    num_batch = 30
    # data_ = data_generator(V, batch_size, num_batch)
    # print(data_.__next__().src)

    # cri1 = LabelSmoothing(size=5, padding_idx=0, smoothing=0.5)
    # predict = torch.FloatTensor([[0, 0.2, 0.7, 0.1, 0],
    #                             [0, 0.8, 0.2, 0.1, 0.7],
    #                             [0.8, 0.4, 0.6, 0.3, 0.2]])
    # target = torch.LongTensor([2, 1, 0])
    # cri1(predict, target)
    # print(cri1.true_dist)
    # plt.imshow(cri1.true_dist)
    # plt.show()
    print('-'*50)
    model = make_model(V, V, N=2)

    model_optimizer = get_std_opt(model)
    # LabelSmoothing的参数说明:
    # size = 目标数据的词汇总数,也是模型最后一层得到张量的最后一维大小
    # padding_idx 要将哪些数字替换成0,padding_idx=0表示不需要替换
    # smoothing 标签的平滑程度,原来标签的值为1,平滑后的值为[1-smoothing,1+smoothing]
    criterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0)
    loss = SimpleLossCompute(model.generator, criterion, model_optimizer)
    # print(loss)
    run(model, loss)
