import os
import math
import numpy
import numpy as np
import torch
from sktime.utils.data_io import load_from_tsfile_to_dataframe
from torch.utils.data import Dataset
from utils import tools

def proprecessing_data(x, target, preprocessingflag):
    ts_num, ts_len, ts_dim = x.shape
    if preprocessingflag in ['standard', 'maxmin', 'norm']:
        dp = tools.DataPreprocessing(mode=preprocessingflag)
        x = np.squeeze(np.concatenate(np.split(x, ts_num, axis=0), axis=1),axis=0)
        x, x_scaler = dp.preprocess(x)
    else:
        x = x
        x_scaler = None
    target = target
    datadict = {}
    for i, v in enumerate(target):
        startidx = np.int(ts_len * i)
        x_ = x[startidx:startidx + ts_len, :]
        k_name = '{}'.format(i)
        datadict[k_name] = (x_, v)
    return datadict, x_scaler


def load_UEA_dataset(root_path,
                     data_path,
                     data_name):
    trainfile = data_name + '_TRAIN.ts'
    train_x, train_y = load_from_tsfile_to_dataframe(os.path.join(root_path, data_path, data_name, trainfile))
    #print('train_x', train_x.shape, train_y.shape)
    train_x = train_x.applymap(lambda x: x.astype(numpy.float32))
    train_x = train_x.to_numpy()
    # print('train_x', train_x.shape, train_y.shape)

    testfile = data_name + '_TEST.ts'
    test_x, test_y = load_from_tsfile_to_dataframe(os.path.join(root_path, data_path, data_name, testfile))
    test_x = test_x.applymap(lambda x: x.astype(numpy.float32))
    test_x = test_x.to_numpy()
    #print('test_x', test_x.shape, test_y.shape)

    train_size = len(train_x)
    test_size = len(test_x)
    ts_dim = train_x.shape[1]
    ts_length = len(train_x[0][0])

    train = numpy.empty((train_size, ts_length, ts_dim))
    test = numpy.empty((test_size, ts_length, ts_dim))
    train_labels = []
    test_labels = []

    for i in range(train_size):
        train_labels.append(train_y[i])
        for j in range(ts_dim):
            train[i, :, j] = train_x[i][j]
    for i in range(test_size):
        test_labels.append(test_y[i])
        for j in range(ts_dim):
            test[i, :, j] = test_x[i][j]
    print(train.shape, test.shape)

    # Move the labels to {0, ..., L-1}
    labels = numpy.unique(train_labels)
    transform = {}
    for i, l in enumerate(labels):
        transform[l] = i
    train_labels = numpy.vectorize(transform.get)(train_labels)
    test_labels = numpy.vectorize(transform.get)(test_labels)
    return train, train_labels, test, test_labels

class Dataset_UEA(Dataset):
    def __init__(self,
                 root_path,
                 data_path,
                 data_name,
                 flag='TRAIN',
                 config=None
                ):

        # init
        self.root_path = root_path
        self.data_path = data_path
        self.data_name = data_name
        self.flag = flag

        self.config = config
        self.preprocessing = 'standard'  # or 'standard' 'maxmin' 'norm'

        self.datadict, self.x_scaler = self.__read_data()

        self.ts_num = len(self.datadict)
        self.ts_len, self.ts_dim = self.datadict['0'][0].shape

        self.num_classes = self.nc

    def __read_data(self):
        if self.flag == 'TRAIN':
            x, y, *_ = load_UEA_dataset(
                root_path=self.root_path,
                data_path=self.data_path,
                data_name=self.data_name)
        elif self.flag == 'TEST':
            *_, x, y,  = load_UEA_dataset(
                root_path=self.root_path,
                data_path=self.data_path,
                data_name=self.data_name)
        else:
            raise ValueError('Unknown flag, TRAIN or TEST is required')
        self.nc = len(np.unique(y))
        datadict, x_scaler = proprecessing_data(x, y, preprocessingflag=self.preprocessing)
        return datadict, x_scaler

    def inverse_transform(self, idx):
        pass

    def __getitem__(self, idx):
        info = '{}'.format(idx)
        seq_x, seq_y = self.datadict[info]
        return torch.from_numpy(seq_x), torch.from_numpy(np.array(seq_y)), info

    def __len__(self):
        return self.ts_num

if __name__ == "__main__":
    '''
    ['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions', 'CharacterTrajectories',
    'Cricket', 'DuckDuckGeese', 'ERing', 'EigenWorms', 'Epilepsy', 'EthanolConcentration',
    'FaceDetection', 'FingerMovements', 'HandMovementDirection', 'Handwriting', 'Heartbeat',
    'InsectWingbeat', 'JapaneseVowels', 'LSST', 'Libras', 'MotorImagery', 'NATOPS', 'PEMS-SF',
    'PenDigits', 'PhonemeSpectra', 'RacketSports', 'SelfRegulationSCP1', 'SelfRegulationSCP2',
    'SpokenArabicDigits', 'StandWalkJump', 'UWaveGestureLibrary']
    '''
    import options

    args = options.Options().parse()

    '''train, train_labels, test, test_labels = load_UEA_dataset(
        root_path=args.root_path,
        data_path=args.data_path,
        data_name=args.data_name)
    print('-[train shape]:', train.shape)
    print('-[train_labels shape]:', train_labels.shape)
    print('-[test shape]:', test.shape)
    print('-[test_labels shape]:', test_labels.shape)'''

    uea = Dataset_UEA(
        root_path=args.root_path,
        data_path=args.data_path,
        data_name=args.data_name,
        flag='TEST')
    print('-[UEA data class]:', uea)
    print('-[Num of data]', uea.ts_num)
    print('-[Len of data]', uea.ts_len)
    print('-[Dim of data]', uea.ts_dim)
    x, y, idx = uea[5]
    print('-[single data shape]:', x.shape)
    print('-[dim of data]:', x.shape[1])
    print('-[length of data]:', x.shape[0])
    print('-[single data label]:', y)
    print('-[idx]', idx)