import datetime
from pathlib import Path

import torch
from torch.utils.data import DataLoader

from common import *
from sklearn.utils import shuffle
from torch import nn
from sklearn.metrics import mean_squared_error as mse, mean_absolute_error as mae


class MyRNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super().__init__()
        self.hidden_size = hidden_size
        self.w_h = nn.Parameter(torch.rand(input_size, hidden_size))
        self.u_h = nn.Parameter(torch.rand(hidden_size, hidden_size))
        self.b_h = nn.Parameter(torch.zeros(hidden_size))

        self.w_y = nn.Parameter(torch.rand(hidden_size, output_size))
        self.b_y = nn.Parameter(torch.zeros(output_size))

        self.tanh = nn.Tanh()
        self.leaky_relu = nn.LeakyReLU()

        for param in self.parameters():
            if param.dim() > 1:
                nn.init.xavier_uniform(param)

    def forward(self, x):
        batch_size = x.size(0)
        seq_len = x.size(2)

        h = torch.zeros(batch_size, self.hidden_size).to(x.device)
        y_list = []
        for i in range(seq_len):
            h = self.tanh(torch.matmul(x[:, i, :], self.w_h) + torch.matmul(h, self.u_h) + self.b_h)
            y = self.leaky_relu(torch.matmul(h, self.w_y) + self.b_y)
            y_list.append(y)
        return h, torch.stack(y_list, dim=1)


def mape(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    non_zero_index = (y_true > 0)
    y_true = y_true[non_zero_index]
    y_pred = y_pred[non_zero_index]
    mape = np.abs((y_true - y_pred) / y_true)
    mape[np.isinf(mape)] = 0
    return np.mean(mape) * 100


if __name__ == '__main__':
    # 设置参数
    device = 'cpu'
    epochs = 20
    lr = 0.001
    input_size = 6
    hidden_size = 6
    output_size = 1
    batch_size = 64
    # 读取数据
    _path_npz = "../test4/data/PEMS04.npz"
    _path_csv = "../test4/data/PEMS04.csv"
    train_proportion = 0.7
    # dataset1, train_set, test_set = data_set(_path, train_proportion)
    train_data, train_loader, test_data, test_loader = get_data(_path_npz, _path_csv, batch_size)
    # 打印
    print(len(train_data))
    print(train_data[0]["flow_x"].size())
    print(train_data[0]["flow_y"].size())
    # 定义模型
    model = MyRNN(input_size=input_size, hidden_size=hidden_size, output_size=output_size).to(device)
    loss_func = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    loss_log = []
    score_log = []
    trained_batches = 0
    curr_lr = lr
    start_time = datetime.datetime.now()
    print(
        f"start training，start time = {start_time}，epochs={epochs}, lr={lr}, batch_size={batch_size},"
        f"len(train_data) = {len(train_data)}, flow_x size={train_data[0]['flow_x'].size()},"
        f" flow_y size={train_data[0]['flow_y'].size()}")
    for epoch in range(epochs):
        for batch in train_loader:
            # [B, N, 1, D],由于标签flow_y在cpu中，所以最后的预测值要放回到cpu中
            hidden, out = model(batch["flow_x"]).to(torch.device("cpu"))
            prediction = out
            loss = loss_func(prediction, batch["flow_y"])
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            loss_log.append(loss.detach().cpu().numpy().tolist())
            trained_batches += 1

            # 每训练一定数量的batch, 就在测试集上测试模型效果
            if trained_batches % 2000 == 0:
                all_prediction = []
                all_label = []
                for batch_test in test_loader:
                    hidden, out = model(batch_size).to(torch.device("cpu"))
                    prediction = out
                    all_prediction.append(prediction.detach().cpu().numpy())
                    all_label.append(batch_test["flow_y"])
                all_prediction = np.concatenate(all_prediction)
                all_label = np.concatenate(all_label)
                # all_label = test_set[:, -1]
                # 进行反归一化操作
                # all_prediction = denormalize(x=all_prediction, feat='0')
                # all_label = denormalize(all_label, '0')
                # 计算测试指标
                rmse_score = math.sqrt(mse(all_label, all_prediction))
                mae_score = mae(all_label, all_prediction)
                mape_score = mape(all_label, all_prediction)
                score_log.append([rmse_score, mae_score, mape_score])
                print('RMSE: %.4f, MAE: %.4f, MAPE: %.4f' % (rmse_score, mae_score, mape_score))
        # 动态更新学习率
        curr_lr /= 10
        update_lr(optimizer, curr_lr)
        print(f"epoch:{epoch}\t min(loss_log):{min(loss_log):.10f} \t")
    end_time = datetime.datetime.now()
    time = end_time - start_time
    print(f"end training，end time = {end_time}, duration time = {time}")
