#!/usr/bin/env python3
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import argparse
import numpy as np
import os
import pandas as pd
from tqdm import tqdm

def generate_graph_seq2seq2_io_data(
    df, x_offsets, period_offsets, y_offsets,
    step_rows=12,period_len=3,period_steps=24,period_units="D",
    add_time_in_period=True ):
    num_samples, num_nodes = df.shape
    data = np.expand_dims(df.values, axis=-1)
    data_list = [data]
    if add_time_in_period:
        print("datetime64[" + period_units + "]")
        time_inp = (df.index.values - df.index.values.astype("datetime64[" + period_units + "]"))/np.timedelta64(1, period_units)
        time_in_period = np.tile(time_inp, [1,num_nodes,1]).transpose((2,1,0))
        data_list.append(time_in_period)

    data = np.concatenate(data_list, axis=-1)
    x, y = [],[]
    # t is the index of the last observation.
    period_rows=0
    if period_units == 'D':
    # step_rows=12,period_len=3,period_steps=24,period_units='D',
        period_rows =  step_rows * period_steps
    else:
        raise NotImplementedError
    if period_rows == 0:
        raise ValueError
    if period_len <=0 or period_offsets.size == 0 :
        min_t = abs(min(x_offsets))
    else:
        min_t = int(period_len * period_rows)
        print('period_len={period_len}, period_rows={period_rows}'.format(period_len=period_len,period_rows=period_rows))
    max_t = abs(num_samples - abs(max(y_offsets)))
    # print("===",min_t)
    # print("===",max_t)
    for t in tqdm( range(min_t, max_t) ):
        if period_len <=0 or period_offsets.size == 0:
            x_t = data[t+ x_offsets,...]
        else:
            x_t_r = data[t+ x_offsets,...]
            x_t_p = data[t + period_offsets,...]
            x_t = np.vstack((x_t_r,x_t_p))
        y_t = data[t + y_offsets,...]
        x.append(x_t)
        y.append(y_t)
    x = np.stack(x, axis=0) #
    y = np.stack(y, axis=0)
    return x,y

def generate_train_val_test(input_df=None,output_dir=None,
                            train_rate=0.7, test_rate=0.2,
                            index_col = "0",
    step_rows=12,period_len=3,period_steps=24,period_units='D',
                           add_time_in_period=True ):
    print("[INFO] Generating training data ...")
    print("[INFO] Using parameters:step_rows={step_rows},period_len={period_len},period_units={period_units}".format(
        step_rows = step_rows, period_len = period_len, period_units = period_units
    ))
    if isinstance(input_df,str):
        if os.path.exists(input_df):
            print("[INFO] Reading Input DataFrame ...")
            print("[INOF] Using column {index_col} for time index...".format(index_col=index_col))
            if index_col.isdigit():
                index_col=int(index_col)
            input_df = pd.read_csv(input_df,index_col = index_col)
        else:
            print("[ERROR] File not exits!")
            raise ValueError
    if input_df is None:
        raise ValueError
    input_df.index = pd.to_datetime(input_df.index)
    print(input_df.index)
    period_rows=0
    if period_units == 'D':
        period_rows =  step_rows * period_steps
    else:
        raise NotImplementedError
    if period_rows == 0:
        raise ValueError
    x_offsets = np.sort(
        np.concatenate((np.arange(-(step_rows-1),1,1),))
    )
    y_offsets = np.sort(np.arange(1,(step_rows+1),1))

    # period_offsets = np.vstack((
    #     y_offsets - period_rows,
    #     y_offsets - period_rows*2,
    #     y_offsets-period_rows*3)) # 改为动态拼接
    if period_len >0 :
        period_offsets = np.vstack(
            [ y_offsets - period_rows * (pp +1 ) for pp in range(period_len)] )

        period_offsets = np.transpose(period_offsets, (1,0)).flatten()
    else:
        period_offsets = np.array([])
    # x:(num_samples, input_lenght,num_nodes,input_dim)
    # y:(num_samples, output_lenght,num_nodes,output_dim)
    x , y = generate_graph_seq2seq2_io_data(
        input_df, x_offsets=x_offsets, period_offsets=period_offsets,
        y_offsets=y_offsets,
        step_rows=step_rows,period_len=period_len,period_steps=period_steps,
        add_time_in_period=add_time_in_period    )
    print("x shape:", x.shape, ", y shape:",y.shape)
    # write the data into npz file
    num_samples = x.shape[0]
    num_test = round(num_samples * test_rate )
    num_train = round(num_samples * train_rate)
    num_val = num_samples - num_test - num_train
    # train data
    x_train, y_train = x[:num_train],y[:num_train]
    # val
    x_val,y_val = (x[num_train: num_train + num_val],
                   y[num_train: num_train + num_val],
                   )
    # test
    x_test,y_test = x[-num_test:], y[-num_test:]

    for cat in ["train","val","test"]:
        _x,_y= locals()["x_" + cat],  locals()["y_" + cat ]
        print(cat, "x: ",_x.shape,"y:",_y.shape)
        np.savez_compressed(
            os.path.join(output_dir,"%s.npz"% cat),
            x = _x,
            y = _y,
            x_offsets = x_offsets.reshape(list(x_offsets.shape) + [1]),
            y_offsets = y_offsets.reshape(list(y_offsets.shape) + [1]),
        )

def main(args):
    generate_train_val_test(input_df = args.input_df_filename,
                            output_dir = args.output_dir,
                            train_rate = args.train_rate,
                            test_rate = args.test_rate,
                            index_col = args.index_col,
                            step_rows = args.step_rows,
                            period_len = args.period_len,
                            period_steps = args.period_steps,
                            period_units = args.period_units
                            )
        # generate_train_val_test(df=None,output_dir=None,
        #                             train_rate=0.7, test_rate=0.2,
        #     step_rows=12,period_len=3,period_steps=24,period_units='D'):

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--output_dir",type=str,default="data/METR-LA/",help="Output directory."
    )
    parser.add_argument(
        "--train_rate",
        type=float,
        default=0.7,
        help="split rate for training data"
    )
    parser.add_argument(
        "--test_rate",
        type=float,
        default=0.2,
        help="split rate for test data"
    )
    parser.add_argument(
        "--input_df_filename",
        type=str,
        default="data/METR-LA/metr_la.csv",
        help="Raw Input Data."
    )
    parser.add_argument(
        "--index_col",
        type=str,
        default= "0" ,
        help="Time Column index or name"
    )
    parser.add_argument(
        "--period_len",
        type=int,
        default=3,
        help="period length for time series"
    )
    parser.add_argument(
        "--step_rows",
        type=int,
        default=12,
        help="rows count in one step"
    )
    parser.add_argument(
        "--period_steps",
        type=int,
        default=24,
        help="step number of one period"
    )
    parser.add_argument(
        "--period_units",
        type=str,
        default="D",
        help="Time Units of one period"
    )
    args = parser.parse_args()
    main(args)
