import os
import time
from datetime import datetime
import sys
import argparse
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from torchsummary import summary
import h5py
import pandas as pd
import warnings
import copy
import math


# 下面是utils部分

warnings.filterwarnings("ignore")
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"


class Dataset(object):
    def __init__(self, data, stats):
        self.__data = data
        self.mean = stats['mean']
        self.std = stats['std']

    def get_data(self, type):
        return self.__data[type]

    def get_stats(self):
        return {'mean': self.mean, 'std': self.std}

    def get_len(self, type):
        return len(self.__data[type])

    def z_inverse(self, type):
        return self.__data[type] * self.std + self.mean


def seq_gen(len_seq, data_seq, offset, n_frame, n_route, n_modal, day_slot, C_0=1):
    n_slot = day_slot

    tmp_seq = np.zeros((len_seq * n_slot, n_frame, n_route, n_modal, C_0))
    for i in range(len_seq):
        for j in range(n_slot):
            end = (i + offset) * day_slot + j + 1
            sta = end - n_frame
            if sta >= 0:
                tmp_seq[i * n_slot + j, :, :, :, :] = np.reshape(data_seq[sta:end, :, :],
                                                                 [n_frame, n_route, n_modal, C_0])
    return tmp_seq


def data_gen(file_path, data_config, n_route, n_frame=21, n_modal=4, day_slot=288):
    n_train, n_val, n_test = data_config
    # generate training, validation and test data
    try:
        h = h5py.File(file_path)
        data_seq = h["data"][:]
    except FileNotFoundError:
        print(f'ERROR: input file was not found in {file_path}.')
    print("DATA SIZE: ", data_seq.shape)
    seq_train = seq_gen(n_train, data_seq, 0, n_frame, n_route, n_modal, day_slot)
    seq_train = seq_train[n_frame:]
    seq_val = seq_gen(n_val, data_seq, n_train, n_frame, n_route, n_modal, day_slot)
    seq_test = seq_gen(n_test, data_seq, n_train + n_val, n_frame, n_route, n_modal, day_slot)
    # x_stats: dict, the stats for the train dataset, including the value of mean and standard deviation.
    x_stats = {'mean': np.mean(seq_train), 'std': np.std(seq_train)}
    x_train = z_score(seq_train, x_stats['mean'], x_stats['std'])
    x_val = z_score(seq_val, x_stats['mean'], x_stats['std'])
    x_test = z_score(seq_test, x_stats['mean'], x_stats['std'])
    x_data = {'train': x_train, 'val': x_val, 'test': x_test}
    dataset = Dataset(x_data, x_stats)
    return dataset


def gen_batch(inputs, batch_size, dynamic_batch=False, shuffle=False, period=None):
    len_inputs = len(inputs)
    if shuffle:
        idx = np.arange(len_inputs)
        np.random.shuffle(idx)
    for start_idx in range(0, len_inputs, batch_size):
        end_idx = start_idx + batch_size
        if end_idx > len_inputs:
            if dynamic_batch:
                end_idx = len_inputs
            else:
                break
        if shuffle:
            slide = idx[start_idx:end_idx]
        else:
            slide = slice(start_idx, end_idx)
        yield inputs[slide]


def z_score(x, mean, std):
    return (x - mean) / std


def z_inverse(x, mean, std):
    return x * std + mean


def RMSE(v, v_):
    return np.mean((v_ - v) ** 2, axis=(0, 2, 4)) ** 0.5


def MAE(v, v_):
    return np.mean(np.abs(v_ - v), axis=(0, 2, 4))


def get_metric(y, y_, x_stats):
    y = z_inverse(y, x_stats['mean'], x_stats['std'])
    y_ = z_inverse(y_, x_stats['mean'], x_stats['std'])
    # rmse, mae : (horizon, sources+1)
    rmse = RMSE(y, y_)
    mae = MAE(y, y_)
    return mae, rmse


# 下面是layers部分

warnings.filterwarnings("ignore")
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"


########################################
## Multi-modality Data Augmentation (MDA)
########################################
class MDA(nn.Module):
    def __init__(self, device, channels, n_query, n_his, num_nodes, num_modals):
        super(MDA, self).__init__()
        self.channels = channels
        self.n_query = n_query
        self.c = channels
        self.t = n_his
        self.n = num_nodes
        self.m = num_modals
        self.att = nn.Conv2d(in_channels=self.channels, out_channels=self.n_query, kernel_size=(1, 1))
        self.agg = nn.AvgPool2d(kernel_size=(self.n_query, 1), stride=1)
        # MoST Embedding
        self.temporal_embedding = nn.Parameter(torch.randn(channels, n_his), requires_grad=True).to(device)
        nn.init.xavier_normal_(self.temporal_embedding)
        self.spatial_embedding = nn.Parameter(torch.randn(channels, num_nodes), requires_grad=True).to(device)
        nn.init.xavier_normal_(self.spatial_embedding)
        self.modality_embedding = nn.Parameter(torch.randn(channels, num_modals), requires_grad=True).to(device)
        nn.init.xavier_normal_(self.modality_embedding)
        # Linear proj.
        self.proj = nn.Sequential(
            nn.Conv3d(in_channels=2 * channels, out_channels=channels, kernel_size=(1, 1, 1)),
            nn.ReLU())

    def get_moste(self):
        """Generate the MoST Embedding E."""
        moste = self.temporal_embedding.reshape(1, self.c, 1, 1, self.t) + self.spatial_embedding.reshape(1, self.c, 1,
                                                                                                          self.n,
                                                                                                          1) + self.modality_embedding.reshape(
            1, self.c, self.m, 1, 1)
        return moste

    def augmentation(self, x, sim, percent=0.1):
        """Generate the modality-aware augumentation."""
        x = x.permute(0, 3, 2, 1, 4)
        b, n, m = sim.shape
        mask_num = int(b * n * m * percent)
        aug_x = copy.deepcopy(x)

        mask_prob = (1. - sim.reshape(-1)).numpy()
        mask_prob /= mask_prob.sum()

        if np.logical_or.reduce(np.isnan(mask_prob)):
            raise ValueError("probabilities contain a value that is not a number")

        x, y, z = np.meshgrid(range(b), range(n), range(m), indexing='ij')
        mask_list = np.random.choice(b * n * m, size=mask_num, p=mask_prob)

        zeros = torch.zeros_like(aug_x[0, 0, 0])
        aug_x[
            x.reshape(-1)[mask_list],
            y.reshape(-1)[mask_list],
            z.reshape(-1)[mask_list]] = zeros
        return aug_x.permute(0, 3, 2, 1, 4)

    def forward(self, x, rep):
        b, c, m, n, _ = rep.shape
        rep = rep.permute(0, 1, 3, 4, 2).reshape(b, c, -1, m)
        # calculate the attention matrix A using key x
        A = self.att(rep)
        A = torch.softmax(A, dim=-1)
        # calculate the modality simlarity (prob)
        A = torch.einsum('bqlm->lbqm', A)
        sim = self.agg(A).squeeze(2).permute(1, 0, 2)
        # get the data augmentation
        aug_x = self.augmentation(x.detach(), sim.detach().cpu())
        # get the MoST embedding
        moste = self.get_moste().repeat(b, 1, 1, 1, 1)
        aug = self.proj(torch.cat((aug_x, moste), dim=1))
        return aug


##########################################################
## Global Self-Supervised Learning (GSSL)
##########################################################
LOG2PI = math.log(2 * math.pi)


class GSSL(nn.Module):
    def __init__(self, in_features, channels, num_comp):
        super(GSSL, self).__init__()
        self.in_features = in_features
        self.num_comp = num_comp
        self.l2norm = lambda x: F.normalize(x, dim=1, p=2)
        self.gamma = nn.Sequential(
            nn.Linear(in_features * channels, num_comp, bias=False),
            nn.Softmax(dim=-1)
        )
        self.softmax = nn.Softmax(dim=-1)
        self.sigma = nn.Conv1d(in_channels=in_features, out_channels=num_comp, kernel_size=1)
        self.mu = nn.Conv1d(in_channels=in_features, out_channels=num_comp, kernel_size=1)

        for m in self.modules():
            self.weights_init(m)

    def weights_init(self, m):
        if isinstance(m, nn.Linear):
            torch.nn.init.xavier_uniform_(m.weight.data)
            if m.bias is not None:
                m.bias.data.fill_(0.0)

    def get_GaussianPara(self, rep):
        '''
        :param rep: representation, [b,c,m,n,t]
        :return gammas: membership score, [b,k]
        :return sigmas, mus: Gaussian parameters (mean and variance), [b,k,c]
        '''
        b, c, m, n, _ = rep.shape
        gammas = self.gamma(rep.reshape(b, -1))
        mus = self.mu(rep.permute(0, 2, 3, 4, 1).reshape(b, -1, c))
        sigmas = torch.exp(self.sigma(rep.permute(0, 2, 3, 4, 1).reshape(b, -1, c)))
        return gammas.unsqueeze(1), mus.permute(0, 2, 1), sigmas.permute(0, 2, 1)

    def get_logPdf(self, rep, mus, sigmas):
        '''
        :param rep: representation, [b,c,m*n*t]
        :param sigmas, mus: Gaussian parameters, [b,c,k]
        return log_component_prob: log PDF, [b, m*n*t, k]
        '''
        h = rep.unsqueeze(-1)
        mus = mus.unsqueeze(2)
        sigmas = sigmas.unsqueeze(2)
        log_component_prob = -torch.log(sigmas) - 0.5 * LOG2PI - 0.5 * torch.pow((h - mus) / sigmas, 2)
        # torch.prod(log_component_prob, 1) may cause inf
        return self.l2norm(torch.prod(log_component_prob, 1))

    def forward(self, rep, rep_aug):
        b, c, m, n, _ = rep_aug.shape
        rep = self.l2norm(rep)
        rep_aug = self.l2norm(rep_aug)
        gammas_aug, mus_aug, sigmas_aug = self.get_GaussianPara(rep_aug)
        # get log Pdf with the original representation H as a self-supervised signal
        log_component_prob_aug = self.get_logPdf(rep.reshape(b, c, -1), mus_aug, sigmas_aug)
        log_prob_aug = log_component_prob_aug + torch.log(gammas_aug)
        # calculate loss
        loss = -torch.mean(torch.log(torch.sum(log_prob_aug.exp(), dim=-1)))
        return loss


##########################################################
## Modality Self-Supervised Learning (MSSL)
##########################################################
class MSSL(nn.Module):
    def __init__(self, channels, num_nodes, num_modals, device):
        super(MSSL, self).__init__()
        self.device = device
        self.flat_hidden = num_nodes * num_modals
        self.W1 = nn.Parameter(torch.FloatTensor(self.flat_hidden, channels))
        self.W2 = nn.Parameter(torch.FloatTensor(self.flat_hidden, channels))
        nn.init.kaiming_uniform_(self.W1, a=math.sqrt(5))
        nn.init.kaiming_uniform_(self.W2, a=math.sqrt(5))
        self.sigmoid = nn.Sigmoid()
        self.net = nn.Bilinear(channels, channels, 1)
        self.logits_loss = nn.BCEWithLogitsLoss()

        for m in self.modules():
            self.weights_init(m)

    def weights_init(self, m):
        if isinstance(m, nn.Bilinear):
            torch.nn.init.xavier_uniform_(m.weight.data)
            if m.bias is not None:
                m.bias.data.fill_(0.0)

    def fusion(self, rep, rep_aug):
        '''
        :param rep: original representation, [b,c,m,n,t]
        :param rep_aug: augmented representation, [b,c,m,n,t]
        :return h: fusion representation, [b,m,n,c]
        :return cm: unified modality representation, [b,m,c]
        '''
        b, c, m, n, _ = rep.shape
        rep = rep.permute(0, 2, 3, 4, 1).reshape(b, -1, c)
        rep_aug = rep_aug.permute(0, 2, 3, 4, 1).reshape(b, -1, c)
        h = (rep * self.W1 + rep_aug * self.W2).reshape(b, m, n, c)
        # unified modality representation
        cm = torch.mean(h, dim=2)
        cm = self.sigmoid(cm)
        return h, cm

    def pn_sampling(self, h):
        '''
        :param h: fusion representation, [b,m,n,c]
        :return h: real hidden representation (w.r.t g), [b,m,n,c]
        :return shuf_h: fake hidden representation, [b,m,n,c]
        '''
        idx = torch.randperm(h.size(1))
        shuf_h = h[:, idx, :, :]
        return h, shuf_h

    def get_logits(self, cm, h_rl, h_fk):
        '''
        :param cm: unified modality representation, [b,m,c]
        :param h_rl: real hidden representation, [b,m,n,c]
        :param h_fk: fake hidden representation, [b,m,n,c]
        :return logits: scores, [b,m,n,2]
        '''
        cm = torch.unsqueeze(cm, dim=2)
        cm = cm.expand_as(h_rl).contiguous()
        # score of real and fake
        sc_rl = self.net(h_rl.contiguous(), cm.contiguous())
        sc_fk = self.net(h_fk.contiguous(), cm.contiguous())
        logits = torch.cat((sc_rl, sc_fk), dim=-1)
        return logits

    def cal_loss(self, logits):
        '''
        :param logits: scores, [b,m,n,2]
        :return loss: MSSL loss
        '''
        b, m, n, _ = logits.shape
        l_rl = torch.ones(b, m, n, 1)
        l_fk = torch.zeros(b, m, n, 1)
        l = torch.cat((l_rl, l_fk), dim=-1).to(self.device)
        loss = self.logits_loss(logits, l)
        return loss

    def forward(self, rep, rep_aug):
        # get the fusion representation h and the unified modality representation cm
        h, cm = self.fusion(rep, rep_aug)
        # positive and negative pair sampling
        h_rl, h_fk = self.pn_sampling(h)
        # calculate loss
        logits = self.get_logits(cm, h_rl, h_fk)
        loss = self.cal_loss(logits)
        return loss


# 下面是多模态时空预测模型构建部分

warnings.filterwarnings("ignore")
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"


########################################
## Spacial-Attention (SA) Layer
########################################
class SA(nn.Module):
    def __init__(self, channels):
        super(SA, self).__init__()
        self.channels = channels
        self.Wq = nn.Sequential(
            nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1)),
            nn.ReLU())
        self.Wk = nn.Sequential(
            nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1)),
            nn.ReLU())
        self.Wv = nn.Sequential(
            nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1)),
            nn.ReLU())
        self.FC = nn.Sequential(
            nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1)),
            nn.ReLU())

    def forward(self, rep):
        query = self.Wq(rep).permute(0, 2, 4, 3, 1)
        key = self.Wk(rep).permute(0, 2, 4, 3, 1)
        value = self.Wv(rep).permute(0, 2, 4, 3, 1)
        attention = torch.matmul(query, key.transpose(3, 4))
        attention /= (self.channels ** 0.5)
        attention = F.softmax(attention, dim=-1)
        rep = torch.matmul(attention, value)
        rep = self.FC(rep.permute(0, 4, 1, 3, 2))
        del query, key, value, attention
        return rep


########################################
## Modality-Attention (MA) Layer
########################################
class MA(nn.Module):
    def __init__(self, channels):
        super(MA, self).__init__()
        self.channels = channels
        self.Wq = nn.Sequential(
            nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1)),
            nn.ReLU())
        self.Wk = nn.Sequential(
            nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1)),
            nn.ReLU())
        self.Wv = nn.Sequential(
            nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1)),
            nn.ReLU())
        self.FC = nn.Sequential(
            nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1)),
            nn.ReLU())

    def forward(self, rep):
        query = self.Wq(rep).permute(0, 3, 4, 2, 1)
        key = self.Wk(rep).permute(0, 3, 4, 2, 1)
        value = self.Wv(rep).permute(0, 3, 4, 2, 1)
        attention = torch.matmul(query, key.transpose(3, 4))
        attention /= (self.channels ** 0.5)
        attention = F.softmax(attention, dim=-1)
        rep = torch.matmul(attention, value)
        rep = self.FC(rep.permute(0, 4, 3, 1, 2))
        del query, key, value, attention
        return rep

    ########################################


## Residual Block
########################################
class ResidualBlock(nn.Module):
    def __init__(self, num_modals, num_nodes, channels, dilation, kernel_size):
        super(ResidualBlock, self).__init__()
        self.num_modals = num_modals
        self.num_nodes = num_nodes
        self.channels = channels
        self.dilation = dilation
        self.kernel_size = kernel_size
        self.num = 3
        # Spatial-Attention Layer
        self.sa = SA(self.channels)
        # Modality-Attention Layer
        self.ma = MA(self.channels)
        # Temporal Convolution
        self.filter_convs = nn.Conv3d(in_channels=self.num * self.channels,
                                      out_channels=self.num_modals * self.channels,
                                      kernel_size=(self.num_modals, 1, self.kernel_size),
                                      dilation=(1, 1, self.dilation))
        self.gate_convs = nn.Conv3d(in_channels=self.num * self.channels,
                                    out_channels=self.num_modals * self.channels,
                                    kernel_size=(self.num_modals, 1, self.kernel_size),
                                    dilation=(1, 1, self.dilation))
        self.residual_convs = nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1))
        # Skip Connection
        self.skip_convs = nn.Conv3d(in_channels=self.channels, out_channels=self.channels, kernel_size=(1, 1, 1))

    def forward(self, rep):
        rep_list = []
        # Spatial-Attention Layer
        rep_spa = self.sa(rep)
        rep_list.append(rep_spa)
        # Modality-Attention Layer
        rep_sou = self.ma(rep)
        rep_list.append(rep_sou)
        rep_list.append(rep)
        rep = torch.cat(rep_list, dim=1)
        # Temporal Convolution (TC)
        filter = self.filter_convs(rep)
        b, _, _, n, t = filter.shape
        filter = torch.tanh(filter).reshape(b, -1, self.num_modals, n, t)
        gate = self.gate_convs(rep)
        gate = torch.sigmoid(gate).reshape(b, -1, self.num_modals, n, t)
        rep = filter * gate
        # Parametrized skip connection
        save_rep = rep
        sk = rep
        sk = self.skip_convs(sk)
        rep = self.residual_convs(rep)
        return rep, sk, gate


########################################
## MoST Encoder
########################################
class MoST_Encoder(nn.Module):
    def __init__(self, layers, num_modals, num_nodes, channels, kernel_size):
        super(MoST_Encoder, self).__init__()
        self.layers = layers
        # Residual Blocks
        self.residualblocks = nn.ModuleList()
        dilation = 1
        for i in range(self.layers):
            self.residualblocks.append(ResidualBlock(num_modals, num_nodes, channels, dilation, kernel_size))
            dilation *= 2

    def forward(self, rep):
        skip = 0
        for i in range(self.layers):
            residual = rep
            rep, sk, gate = self.residualblocks[i](rep)
            rep = rep + residual[:, :, :, :, -rep.size(4):]
            try:
                skip = sk + skip[:, :, :, :, -sk.size(4):]
            except:
                skip = sk
        return skip


########################################
## MoSSL Framework
########################################
class MoSSL(nn.Module):
    def __init__(self, device, num_comp, num_nodes, num_modals, n_his, n_pred, channels, layers, in_dim, kernel_size=2):
        super(MoSSL, self).__init__()
        # Linear Projection
        self.proj1 = nn.Sequential(
            nn.Conv3d(in_channels=in_dim, out_channels=int(channels // 2), kernel_size=(1, 1, 1)),
            nn.ReLU(),
            nn.Conv3d(in_channels=int(channels // 2), out_channels=channels, kernel_size=(1, 1, 1)),
            nn.ReLU()
        )
        # MoST Encoder
        self.most_encoder = MoST_Encoder(layers, num_modals, num_nodes, channels, kernel_size)
        # Predictor
        self.predictor = nn.Sequential(
            nn.ReLU(),
            nn.Conv3d(in_channels=channels, out_channels=channels, kernel_size=(1, 1, 1)),
            nn.ReLU(),
            nn.Conv3d(in_channels=channels, out_channels=n_pred, kernel_size=(1, 1, 1))
        )
        # Multi-modality Data Augmentation
        self.mda = MDA(device, channels, channels, n_his, num_nodes, num_modals)
        # Global Self-Supervised Learning (GSSL)
        self.in_features = num_nodes * num_modals
        self.gssl = GSSL(self.in_features, channels, num_comp)
        # Modality Self-Supervised Learning (MSSL)
        self.mssl = MSSL(channels, num_nodes, num_modals, device)

    def forward(self, input):
        input = input.permute(0, 4, 3, 2, 1)
        # Up-stream: the original view
        # Init representation
        x = self.proj1(input)
        # MoST Encoder
        rep = self.most_encoder(x)
        pred = self.predictor(rep)
        # Down-stream: the augmented view
        # Generate the Multi-modality Data Augmentation
        x_aug = self.mda(x, rep)
        # Shared MoST Encoder
        rep_aug = self.most_encoder(x_aug)
        # Global Self-Supervised Learning (GSSL)
        gssl_loss = self.gssl(rep, rep_aug)
        # Modality Self-Supervised Learning (MSSL)
        mssl_loss = self.mssl(rep, rep_aug)
        loss = gssl_loss + mssl_loss
        return pred.permute(0, 1, 3, 2, 4), loss


# 下面是train_test


np.random.seed(1337)
torch.backends.cudnn.benchmark = True


def get_model():
    model = MoSSL(device, args.num_comp, args.num_nodes, args.num_modals, args.input_length, args.horizon,
                  args.hidden_channels, layers, args.indim).to(device)
    return model


def prepare_x_y(x, y):
    x = torch.tensor(x, dtype=torch.float32)
    y = torch.tensor(y, dtype=torch.float32)
    return x.to(device), y.to(device)


def predictModel(model, seq, dynamic_batch=True):
    model.eval()
    pred_list = []
    for i in gen_batch(seq, min(args.batch_size, len(seq[0])), dynamic_batch=dynamic_batch):
        # Note: use np.copy() to avoid the modification of source data.
        test_seq = np.copy(i[:, 0:args.input_length, :, :, :])
        step_list = []
        test_seq_th = torch.tensor(test_seq, dtype=torch.float32).to(device)
        pred, _ = model(test_seq_th)
        pred = pred.data.cpu().numpy()
        pred_list.append(pred)
    pred_array = np.concatenate(pred_list, axis=0)
    return pred_array, pred_array.shape[0]


def modelInference(model, inputs):
    x_val, x_test, x_stats = inputs.get_data('val'), inputs.get_data('test'), inputs.get_stats()
    if args.input_length + args.horizon > x_val[0].shape[0]:
        raise ValueError(f'ERROR: the value of horizon "{args.horizon}" exceeds the length limit.')
    # evaluation
    y_val, len_val = predictModel(model, x_val)
    mae_val, rmse_val = get_metric(x_val[0:len_val, args.input_length:args.horizon + args.input_length, :, :, :],
                                   y_val[:, :, :, :, :], x_stats)
    # test
    y_test, len_test = predictModel(model, x_test)
    mae_test, rmse_test = get_metric(x_test[0:len_test, args.input_length:args.horizon + args.input_length, :, :, :],
                                     y_test[:, :, :, :, :], x_stats)
    return mae_val, rmse_val, mae_test, rmse_test


def traintest_model(dataset):
    model = get_model()
    file_name = "{}_{}_num_comp{}_hc{}_l{}_his{}_pred{}_v{}".format(args.model_name, args.data_name, args.num_comp,
                                                                    args.hidden_channels, layers,
                                                                    args.input_length, args.horizon, args.version)
    os.makedirs('MODEL', exist_ok=True)
    save_model_path = os.path.join('MODEL', '{}.h5'.format(file_name))
    print('=' * 10)
    print("training and testing model...")
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, eps=args.epsilon)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.steps, gamma=args.lr_decay_ratio)
    min_rmse = float('inf')
    wait = 0
    nb_epoch = 500
    for epoch in range(nb_epoch):
        start_time = time.time()
        model.train()
        losses, reg_losses, fea_losses = [], [], []
        for j, x_batch in enumerate(
                gen_batch(dataset.get_data('train'), args.batch_size, dynamic_batch=True, shuffle=True)):
            optimizer.zero_grad()
            x = x_batch[:, 0:args.input_length]
            y = x_batch[:, args.input_length:args.input_length + args.horizon, :, :, :]
            x, y = prepare_x_y(x, y)
            pred, fea_loss = model(x)
            reg_loss = criterion(pred, y)
            loss = reg_loss + fea_loss
            losses.append(loss.item())
            reg_losses.append(reg_loss.item())
            fea_losses.append(fea_loss.item())
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(),
                                           args.max_grad_norm)  # gradient clipping - this does it in place
            optimizer.step()
        train_loss = np.mean(losses)
        train_reg_loss = np.mean(reg_losses)
        train_fea_loss = np.mean(fea_losses)
        lr_scheduler.step()
        end_time = time.time()
        mae_val, rmse_val, mae_test, rmse_test = modelInference(model, dataset)
        print('=' * 80)
        print('Epoch {}: train_loss: {:.3f} [{:.3f},{:.3f}]; lr: {:.4f}; {:.1f}s'.format(
            epoch, train_loss, train_reg_loss, train_fea_loss, optimizer.param_groups[0]['lr'],
            (end_time - start_time)))
        for i in range(args.num_modals):
            print('Modality {}:'.format(i))
            print('Horizon 1 Hour| MAE: {:.2f}, {:.2f}; RMSE: {:.2f}, {:.2f};'
                  .format(mae_val[0, i], mae_test[0, i], rmse_val[0, i], rmse_test[0, i]))
            print('Horizon 2 Hour| MAE: {:.2f}, {:.2f}; RMSE: {:.2f}, {:.2f};'
                  .format(mae_val[1, i], mae_test[1, i], rmse_val[1, i], rmse_test[1, i]))
            print('Horizon 3 Hour| MAE: {:.2f}, {:.2f}; RMSE: {:.2f}, {:.2f};'
                  .format(mae_val[2, i], mae_test[2, i], rmse_val[2, i], rmse_test[2, i]))
        total_rmse = rmse_val.sum()
        if total_rmse < min_rmse:
            print('Toal RMSE decrease from {:.2f} to {:.2f} ({:.2f})'.format(min_rmse, total_rmse,
                                                                             (min_rmse - total_rmse)))
            model.eval()
            torch.save(model.state_dict(), save_model_path)
            min_rmse = total_rmse
            wait = 0
        else:
            wait += 1
            if wait == args.patience:
                print('Early stopping at epoch: %d' % epoch)
                break

    model.load_state_dict(torch.load(save_model_path))
    mae_val, rmse_val, mae_test, rmse_test = modelInference(model, dataset)
    print('=' * 20 + 'Best model performance' + '=' * 20)
    for i in range(args.num_modals):
        print('Modality {}:'.format(i))
        print('Horizon 1 Hour| MAE: {:.2f}, {:.2f}; RMSE: {:.2f}, {:.2f};'
              .format(mae_val[0, i], mae_test[0, i], rmse_val[0, i], rmse_test[0, i]))
        print('Horizon 2 Hour| MAE: {:.2f}, {:.2f}; RMSE: {:.2f}, {:.2f};'
              .format(mae_val[1, i], mae_test[1, i], rmse_val[1, i], rmse_test[1, i]))
        print('Horizon 3 Hour| MAE: {:.2f}, {:.2f}; RMSE: {:.2f}, {:.2f};'
              .format(mae_val[2, i], mae_test[2, i], rmse_val[2, i], rmse_test[2, i]))


# Params #
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='MoSSL', type=str, help='model name')
parser.add_argument('--data_name', default='NYC', type=str, help='NYC dataset')
parser.add_argument('--num_nodes', default=98, type=int, help='number of nodes')
parser.add_argument('--num_modals', default=4, type=int, help='number of modalities')
parser.add_argument('--input_length', default=16, type=int, help='input length')
parser.add_argument('--horizon', default=3, type=int, help='output length')
parser.add_argument('--indim', default=1, type=int, help='input dimension')
parser.add_argument('--num_comp', default=4, type=int, help='number of clusters')
parser.add_argument('--hidden_channels', default=48, type=int, help='number of hidden channels')
parser.add_argument('--batch_size', default=16, type=int, help='number of batch size')
parser.add_argument("--patience", default=15, type=int, help="patience used for early stop")
parser.add_argument("--lr", default=0.01, type=float, help="base learning rate")
parser.add_argument("--epsilon", default=1e-3, type=float, help="optimizer epsilon")
parser.add_argument("--steps", default=[50, 100], type=eval, help="steps")
parser.add_argument("--lr_decay_ratio", default=0.1, type=float, help="lr_decay_ratio")
parser.add_argument("--max_grad_norm", default=5, type=int, help="max_grad_norm")
parser.add_argument('-version', default=0, type=int, help='index of repeated experiments')
parser.add_argument('--cuda', default=0, type=int, help='cuda name')
args = parser.parse_args()
device = torch.device("cuda:{}".format(args.cuda)) if torch.cuda.is_available() else torch.device("cpu")
layers = int(np.log2(args.input_length))
cpu_number = 1
os.environ['OMP_NUM_THREADS'] = str(cpu_number)
os.environ['OPENBLAS_NUM_THREADS'] = str(cpu_number)
os.environ['MKL_NUM_THREADS'] = str(cpu_number)
os.environ['VECLIB_MAXIMUM_THREADS'] = str(cpu_number)
os.environ['NUMEXPR_NUM_THREADS'] = str(cpu_number)
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
torch.set_num_threads(cpu_number)
###########################################################
print('=' * 10)
print('| Model: {0} | Dataset: {1} | History: {2} | Horizon: {3}'.format(args.model_name, args.data_name,
                                                                         args.input_length, args.horizon))
print("version: ", args.version)
print("number of clusters: ", args.num_comp)
print("channel in: ", args.indim)
print("hidden channels: ", args.hidden_channels)
print("layers: ", layers)
# load data
print('=' * 10)
print("loading data...")
if args.data_name == 'NYC':
    n_train, n_val, n_test = 81, 5, 5
    args.num_nodes = 20
    args.num_modals = 4
    dataset = data_gen('data/nyc.h5', (n_train, n_val, n_test), args.num_nodes, args.input_length + args.horizon,
                       args.num_modals, day_slot=48)


#######################################
def main():
    print('=' * 10)
    print("compiling model...")
    print('=' * 10)
    print("init model...")
    start = time.time()
    traintest_model(dataset)
    end = time.time()
    print('Total running {:.1f} hours.'.format((end - start) / 3600))


if __name__ == '__main__':
    main()
