import os
import re
import glob
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset

from utils import tools
from sktime.utils.data_io import load_from_tsfile_to_dataframe
from collections import OrderedDict, defaultdict

def subsample(y, limit=256, factor=2):
    """
    If a given Series is longer than `limit`, returns subsampled sequence by the specified integer factor
    """
    if len(y) > limit:
        return y[::factor].reset_index(drop=True)
    return y

def interpolate_missing(y):
    """
    Replaces NaN values in pd.Series `y` using linear interpolation
    """
    if y.isna().any():
        y = y.interpolate(method='linear', limit_direction='both')
    return y

def proprecessing_data(data, target, preprocessingflag):
    ts_len = np.int(len(data)/len(target))
    if preprocessingflag in ['standard', 'maxmin', 'norm']:
        dp = tools.DataPreprocessing(mode=preprocessingflag)
        target, target_scaler = dp.preprocess(target.values.reshape(-1,1))
        x, x_scaler = dp.preprocess(data.values)
    else:
        target = target.values.reshape(-1,1)
        x = data.values
        x_scaler = None
        target_scaler = None

    datadict = {}
    for i, v in enumerate(target):
        startidx = np.int(ts_len * i)
        x_ = x[startidx:startidx+ts_len]
        k_name = '{}'.format(i)
        datadict[k_name]= (x_, v)
    return datadict, x_scaler, target_scaler

class Monash_Regression_Dataset(Dataset):
    """
    Dataset class for datasets included in:
        1) the Time Series Regression Archive (www.timeseriesregression.org), or
        2) the Time Series Classification Archive (www.timeseriesclassification.com)
    Attributes:
        all_df: (num_samples * seq_len, num_columns) dataframe indexed by integer indices, with multiple rows corresponding to the same index (sample).
            Each row is a time step; Each column contains either metadata (e.g. timestamp) or a feature.
        feature_df: (num_samples * seq_len, feat_dim) dataframe; contains the subset of columns of `all_df` which correspond to selected features
        feature_names: names of columns contained in `feature_df` (same as feature_df.columns)
        all_IDs: (num_samples,) series of IDs contained in `all_df`/`feature_df` (same as all_df.index.unique() )
        labels_df: (num_samples, num_labels) pd.DataFrame of label(s) for each sample
        max_seq_len: maximum sequence (time series) length. If None, script argument `max_seq_len` will be used.
            (Moreover, script argument overrides this attribute)
    """
    def __init__(self,
                 root_path,
                 data_path,
                 data_name,
                 flag='TRAIN',
                 config=None
                 ):

        self.root_path = root_path
        self.data_path = data_path
        self.data_name = data_name
        self.data_dir = os.path.join(self.root_path, self.data_path, self.data_name)
        self.flag = flag

        self.config = config

        self.preprocessing = 'standard' # or 'standard' 'maxmin' 'norm'

        self.alldata, self.x_scaler, self.target_scaler = self.__read_data(data_dir=self.data_dir)
        self.ts_num = len(self.alldata)
        self.ts_len, self.ts_dim = self.alldata['0'][0].shape
        self.num_classes = 1

    def __read_data(self, data_dir):
        """
        Loads datasets from csv files contained in `root_dir` into a dataframe, optionally choosing from `pattern`
        # Select paths for training and evaluation
        """
        file_list = glob.glob(os.path.join(data_dir, '*'))
        if len(file_list) == 0:
            raise Exception('No files found using: {}'.format(os.path.join(data_dir, '*')))
        pattern = self.flag + '.ts'
        selected_file = [f for f in file_list if f.endswith(f'_{self.flag}.ts')]
        if len(selected_file) == 0:
            raise Exception("No .ts files found using pattern: '{}'".format(pattern))
        all_df, labels_df = self.load_data(selected_file[0])  # a single file contains dataset
        # convert to numpy and use dict to store
        data, x_scaler, target_scaler = proprecessing_data(all_df, labels_df, preprocessingflag=self.preprocessing)

        return data, x_scaler, target_scaler

    def load_data(self, filepath):

        df, labels = tools.load_from_tsfile_to_dataframe_regression(filepath,
                                                                    return_separate_X_and_y=True,
                                                                    replace_missing_vals_with='NaN',
                                                                    encodingtype=self.config.dataencodingtype) #self.config.dataencodingtype
        labels_df = pd.DataFrame(labels, dtype=np.float32)
        lengths = df.applymap(lambda x: len(x)).values  # (num_samples, num_dimensions) array containing the length of each series
        horiz_diffs = np.abs(lengths - np.expand_dims(lengths[:, 0], -1))

        # most general check: len(np.unique(lengths.values)) > 1:  # returns array of unique lengths of sequences
        if np.sum(horiz_diffs) > 0:  # if any row (sample) has varying length across dimensions
            print("Not all time series dimensions have same length - will attempt to fix by subsampling first dimension...")
            df = df.applymap(subsample)  # TODO: this addresses a very specific case (PPGDalia)

        if self.config.subsample_factor: #
            df = df.applymap(lambda x: subsample(x, limit=0, factor=self.config.subsample_factor))

        lengths = df.applymap(lambda x: len(x)).values
        vert_diffs = np.abs(lengths - np.expand_dims(lengths[0, :], 0))
        if np.sum(vert_diffs) > 0:  # if any column (dimension) has varying length across samples
            self.max_seq_len = int(np.max(lengths[:, 0]))
            print("Not all samples have same length: maximum length set to {}".format(self.max_seq_len))
        else:
            self.max_seq_len = lengths[0, 0]

        # First create a (seq_len, feat_dim) dataframe for each sample, indexed by a single integer ("ID" of the sample)
        # Then concatenate into a (num_samples * seq_len, feat_dim) dataframe, with multiple rows corresponding to the
        # sample index (i.e. the same scheme as all datasets in this project)
        df = pd.concat((pd.DataFrame({col: df.loc[row, col] for col in df.columns}).reset_index(drop=True).set_index(
            pd.Series(lengths[row, 0]*[row])) for row in range(df.shape[0])), axis=0)

        # Replace NaN values
        grp = df.groupby(by=df.index)
        df = grp.transform(interpolate_missing)
        return df, labels_df

    def __getitem__(self, idx):
        info = '{}'.format(idx)
        seq_x, seq_y = self.alldata[info]
        return torch.from_numpy(np.array(seq_x)), torch.from_numpy(np.array(seq_y)), info

    def __len__(self):
        return self.ts_num


if __name__ == '__main__':
    import options
    args = options.Options().parse()

    mydata = Monash_Regression_Dataset(
        root_path=args.root_path,
        data_path=args.data_path,
        data_name=args.data_name,
        flag='TRAIN',
        config=args
    )
    print(len(mydata))
    alldata = mydata.alldata
    for i in range(len(mydata)):
        seq_x, seq_y, info = mydata[i]
        #print(info, seq_x.shape, seq_y)

    print('jjjjjjjjjj')
    print(mydata['93'][1])
    target_scaler = mydata.target_scaler
    target = target_scaler.inverse_transform(mydata['93'][1].numpy())
    print(target)
