import datetime
from pathlib import Path

import torch

from common import *
from sklearn.utils import shuffle
from torch import nn
from sklearn.metrics import mean_squared_error as mse, mean_absolute_error as mae


def mape(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    non_zero_index = (y_true > 0)
    y_true = y_true[non_zero_index]
    y_pred = y_pred[non_zero_index]
    mape = np.abs((y_true - y_pred) / y_true)
    mape[np.isinf(mape)] = 0
    return np.mean(mape) * 100


# 读取数据
_path = "data/PEMS04.npz"
train_proportion = 0.7
train_set, test_set = data_set(_path, train_proportion)

device = 'cpu'
epochs = 50
lr = 0.0005
batch_size = 64
input_size = 1
hidden_size = 32
output_size = 1
num_layers = 1
model = torch.nn.GRU(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers).to(device)
out_linear = nn.Sequential(nn.Linear(32, 1), nn.LeakyReLU())
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(list(model.parameters()) + list(out_linear.parameters()), lr=lr)

loss_log = []
score_log = []
trained_batches = 0
curr_lr = lr
start_time = datetime.datetime.now()
print(
    f"start training，start time = {start_time}，epochs={epochs}, lr={lr}, batch_size={batch_size}, len(train_loader)={train_set.shape}")
h_n_set = torch.zeros(num_layers, batch_size, hidden_size)
for epoch in range(epochs):
    loss_sum, trained_batches = 0, 0
    for batch in next_batch(shuffle(train_set), batch_size=batch_size):
        batch = torch.from_numpy(batch).float().to(device)
        # 使用短序列的前12个值作为历史， 最后一个值作为预测值
        x, label = batch[:, :12], batch[:, -1]  # -1 取倒数第一个
        out, h_n = model(x.T.unsqueeze(-1))
        out = out_linear(out[-1, :, :])
        prediction = out.squeeze(-1)  # 去掉layers
        loss = loss_func(prediction, label)
        loss_sum += loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        trained_batches += 1

        # 每训练一定数量的batch, 就在测试集上测试模型效果
        if trained_batches % 50 == 0:
            all_prediction = []
            all_label = []
            for batch_test in next_batch(test_set, batch_size=batch_size):
                batch_test = torch.from_numpy(batch_test).float().to(device)
                # 使用短序列的前12个值作为历史， 最后一个值作为预测值
                x, label = batch_test[:, :12], batch_test[:, -1]
                out, h_n = model(x.T.unsqueeze(-1))
                out = out_linear(out[-1, :, :])
                prediction = out.squeeze(-1)  # 去掉layers
                all_prediction.append(prediction.detach().cpu().numpy())
                all_label.append(label)
            all_prediction = np.concatenate(all_prediction)
            all_label = np.concatenate(all_label)
            # 进行反归一化操作
            # all_prediction = denormalize(x=all_prediction, feat='0')
            # all_label = denormalize(all_label, '0')
            # 计算测试指标
            rmse_score = math.sqrt(mse(all_label, all_prediction))
            mae_score = mae(all_label, all_prediction)
            mape_score = mape(all_label, all_prediction)
            score_log.append([rmse_score, mae_score, mape_score])
            print('RMSE: %.4f, MAE: %.4f, MAPE: %.4f' % (rmse_score, mae_score, mape_score))
    # 动态更新学习率
    # if epoch % 50 == 0:
    #     # 动态更新学习率
    #     curr_lr /= 3
    #     update_lr(optimizer, curr_lr)
    loss_log.append(loss_sum / trained_batches)
    print(f"epoch:{epoch}\t min(loss_log):{min(loss_log):.10f} \t current_loss:{loss:.10f} \t")
end_time = datetime.datetime.now()
time = end_time - start_time
print(f"end training，end time = {end_time}, duration time = {time}")
loss_draw(loss_log, score_log)
