import numpy as np
import torch
import torch.nn as nn
from net import TransformerModel

epochs = 100
lr = 0.001
batch_size = 128
pred_num = 50
device = "cuda"

'''
数据中一共有 2944 条数据, 目标是以50条数据预测下一条数据, 因此共有 2944 - 50 = 2894 条训练数据
以 train_set_nums : test_set_nums = 4 : 1 设置，即 2312 : 582
'''

train_nums = 2312
test_nums = 2894

dataset = torch.tensor(np.load("npys/data.npy", allow_pickle=True)).type(torch.FloatTensor)

def train(model, loss_fn, opt, save=False):
    loss = 0
    max_acc = 0.
    losses = []

    for i in range(epochs):
        j = 50
        acc = []
        data = []
        label = []
        while j <= train_nums:
            data.append(dataset[j-50:j])
            label.append(dataset[j+1])
            j += 1

        # 模型训练
        data = torch.stack(data).to(device)
        data = data.view(len(data), pred_num, -1)
        label = torch.stack(label).to(device)
        y_pred = model(data)
        loss = loss_fn(y_pred, label)
        losses.append(loss.item())
        opt.zero_grad()
        loss.backward()
        opt.step()

        # 验证准确率
        temp_acc = []
        for k in range(y_pred.shape[0]):
            if torch.argmax(y_pred[k]) == torch.argmax(label[k]):
                temp_acc.append(1)
            else:
                temp_acc.append(0)
        temp_acc = np.array(temp_acc, dtype="float32")
        acc.append(np.sum(temp_acc) / temp_acc.shape[0])
        train_acc = float(np.average(np.array(acc, dtype="float32")))
        print('epoch:%d loss:%.5f acc:%.5f' % (i, loss.item(), train_acc))

        # 保存训练模型
        if not save:
            continue
        if train_acc > max_acc:
            max_acc = train_acc
            state = {'model': model.state_dict(), 'opt': opt.state_dict()}
            torch.save(state, 'models/LotteryModel.pkl')

def test(model):
    model.load_state_dict(torch.load('models/LotteryModel.pkl')['model'])
    j = train_nums - 50
    acc = []
    data = []
    label = []
    while j <= test_nums:
        data.append(dataset[j-50:j])
        label.append(dataset[j+1])
        j += 1

    data = torch.stack(data).to(device)
    data = data.view(len(data), pred_num, -1)
    label = torch.stack(label).to(device)
    with torch.no_grad():
        y_pred = model(data)
        loss = loss_fn(y_pred, label)

        # 验证准确率
        temp_acc = []
        for k in range(y_pred.shape[0]):
            if torch.argmax(y_pred[k]) == torch.argmax(label[k]):
                temp_acc.append(1)
            else:
                temp_acc.append(0)
        temp_acc = np.array(temp_acc, dtype="float32")
        acc.append(np.sum(temp_acc) / temp_acc.shape[0])
        test_acc = float(np.average(np.array(acc, dtype="float32")))
        print('test: loss:%.5f acc:%.5f' % (loss.item(), test_acc))

if __name__ == "__main__":
    model = TransformerModel().to(device)
    loss_fn = nn.CrossEntropyLoss().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    train(model, loss_fn, optimizer, save=True)
    test(model)