import torch
import argparse
from data import loadwindpower, loadisdb, loadmonash
from torch.utils.data import DataLoader, Dataset
import numpy as np

def padding_attn_collate(data, max_len=None):
    """
    :param data: a batch of dataset if used in dataloader, length is len(data), such as
                 [
                 (features, labels, info),
                 (features, labels, info),
                 ....
                 ]W
    :param max_len:
    :return:
    """
    batch_size = len(data)
    features, labels, info = zip(*data)  # 将对象中对应的元素打包成一个个元组,
    lengths = [X.shape[0] for X in features]  # original sequence length for each time series， which is Length * dim
    if max_len is None:
        max_len = max(lengths)
    X = torch.zeros(batch_size, max_len, features[0].shape[-1])  # (batch_size, padded_length, feat_dim)
    for i in range(batch_size): # fill the data into X
        end = min(lengths[i], max_len) # if max_len is given and (maxlen < length), end = max_len, else original lengths of sequence

        X[i, :end, :] = features[i][:end, :] # fill the data in to X
    targets = torch.stack(labels, dim=0)  # (batch_size, num_labels)
    tensor_length = torch.tensor(lengths, dtype=torch.int16)
    max_len = max_len or tensor_length.max_val()
    the_padding_mask = torch.arange(0, max_len, device=tensor_length.device).type_as(tensor_length).repeat(batch_size, 1).lt(tensor_length.unsqueeze(1))
    if torch.is_tensor(info):
        infonew = torch.stack(info, dim=0)  # (batch_size, num_labels)
    else:
        infonew = np.stack(info, axis=0)
    return X, targets, ~the_padding_mask, infonew




if __name__ == "__main__":

    '''
    ----------------------------------------------------------------------------------------------
    common config for all data
    '''
    parser = argparse.ArgumentParser(description='[TII] Experiment')
    parser.add_argument('--root_path', type=str, default='/home/kexin/phdwork/work4-tii/data', help='root path of the data file')
    parser.add_argument('--data_name', type=str, default='AppliancesEnergy', help='data_name')
    parser.add_argument('--data_path', type=str, default='Monash/Monash_UEA_UCR_Regression_Archive',
                        help='Monash/Monash_UEA_UCR_Regression_Archive -> [AppliancesEnergy]'
                             'Windpower/raw_windpower/dataset -> [JSDF001]'
                             'Isdb -> isdb')
    parser.add_argument('--num_workers', type=int, default=0, help='dataloader num workers')
    '''
       -----------------------------------------------------------------------------------------------
       config for ISDB classification
       '''
    parser.add_argument('--test_mode', type=str, default='test_26', help='Whether to use 26 loops to test or'
                                                                         'randomly select N loops to test')
    parser.add_argument('--test_n', type=int, default=15,
                        help='how many loops are used to test in each class if test_mode is test_random')

    '''
    -----------------------------------------------------------------------------------------------
    config for Monash Regression
    '''
    parser.add_argument('--subsample_factor', type=int, default=None,
                        help='Sub-sampling factor used for long sequences: keep every kth sample')
    parser.add_argument('--dataencodingtype', type=str, default='utf-8',
                        help='using ISO-8859-1 for specific Monash Regression Datset: BIDMC32HR，BIDMC32RR，and BIDMC32SpO2')

    parser.add_argument('--batch_size', type=int, default=8, help='batch size of train input data')
    args = parser.parse_args()

    source_dict = {
        'Monash/Monash_UEA_UCR_Regression_Archive': 'MONASH',
        'Isdb': 'ISDB'
    }
    args.data_source = source_dict[args.data_path]

    # ============================================ #
    data_dict = {
        'ISDB': loadisdb.Dataset_isdb,
        'WIND': loadwindpower.Dataset_windpower,
        'MONASH': loadmonash.Monash_Regression_Dataset,
    }
    flag = 'TRAIN'
    Data = data_dict[args.data_source]

    if flag == 'TRAIN':
        shuffle_flag = True
        drop_last = False
        batch_size = args.batch_size
    elif flag == 'TEST':
        shuffle_flag = False
        drop_last = False
        batch_size = args.batch_size
    elif flag == 'VAL':
        shuffle_flag = True
        drop_last = False
        batch_size = 1
    else:
        raise ValueError('Unknown flag, TRAIN or VAL or TEST is required')

    data_set = Data(root_path=args.root_path, data_path=args.data_path, data_name=args.data_name,
                    flag=flag, config=args) # line 62 in main.py in mvts_transformer

    collate_fn = padding_attn_collate
    data_loader = DataLoader(data_set, batch_size=batch_size, shuffle=shuffle_flag,
                             num_workers=args.num_workers, drop_last=drop_last,
                             collate_fn=lambda x: collate_fn(x, max_len=144))

    for i, x in enumerate(data_loader):
        print('main', x[0].shape)