import numpy as np
from os.path import dirname, abspath, join
import pandas as pd
import os


class NormScalar:
    def __init__(self, indicators):
        self.indicators = indicators
        self.statistics = self.get_statistics()
    def get_statistics(self):
        pass

    def normalize(self, data, is_masked):
        pass

    def renormalize(self, data):
        pass


class MinMaxScalar(NormScalar):
    def __init__(self, indicators):
        super(MinMaxScalar, self).__init__(indicators)
        self.min_val, self.max_val = self.statistics

    def get_statistics(self):
        df = pd.read_csv(join(dirname(dirname(abspath(__file__))), 'data', 'water_data.csv'))
        df = df[self.indicators.split(',')]
        ori_data = df.to_numpy()
        min_val = np.min(ori_data, 0)
        max_val = np.max(ori_data, 0)

        return min_val, max_val

    def normalize(self, data, is_masked):
        # is_masked 说明data是有无缺失值的
        if is_masked:  # 只能对非0的进行归一化，
            for item in data:
                for j in range(len(item)):
                    if item[j] != 0:
                        item[j] = (item[j] - self.min_val[j]) / (self.max_val[j] - self.min_val[j] + 1e-7)
        else:
            numerator = data - self.min_val
            denominator = self.max_val - self.min_val
            data = numerator / (denominator + 1e-7)
        return data

    def renormalize(self, data):
        data = data * (self.max_val - self.min_val + 1e-7) + self.min_val
        return data


class MeanStdScalar(NormScalar):
    def __init__(self, indicators):
        super(MeanStdScalar, self).__init__(indicators)
        self.mean_val, self.std_val = self.statistics

    def get_statistics(self):
        df = pd.read_csv(join(dirname(dirname(abspath(__file__))), 'data', 'water_data.csv'))
        df = df[self.indicators.split(',')]
        ori_data = df.to_numpy()
        mean_val = np.mean(ori_data, 0)
        std_val = np.std(ori_data, 0)

        return mean_val, std_val

    def normalize(self, data, is_masked):
        # is_masked 说明data是有无缺失值的
        if is_masked:  # 只能对非0的进行归一化，
            for item in data:
                for j in range(len(item)):
                    if item[j] != 0:
                        item[j] = (item[j] - self.mean_val[j]) / (self.std_val[j] + 1e-7)
        else:
            numerator = data - self.mean_val
            data = numerator / (self.std_val + 1e-7)
        return data

    def renormalize(self, data):
        data = data * (self.std_val + 1e-7) + self.mean_val
        return data


class Dataset:
    def __init__(self, data_name, data_type, seq_len, indicators, masked_indicator):
        '''

        Args:
            data_name: 数据集，water，stock
            data_type: 有缺失的masked，无缺失的ori
            seq_len: 序列长度
            indicators: 输入指标
            masked_indicator: masked指标,""为所有
        '''
        self.data_name = data_name
        self.data_type = data_type
        self.seq_len = seq_len
        self.indicators = indicators
        self.masked_indicator = masked_indicator
        # 归一化的统计量，全部都用完整数据集的
        # todo 这里换成标准化，不用minmax
        # self.scalar = MeanStdScalar(indicators)
        self.scalar = MinMaxScalar(indicators)

    def load_train_data(self):
        # Data loading,只用于加载训练集（完整或缺失）
        # 不作为属性，因为提前加载浪费内存，impute的时候用不上训练数据
        if self.data_name in ['stock', 'energy']:
            ori_data, min_val, max_val = real_data_loading(self.data_name, self.seq_len)
        elif self.data_name == 'sine':
            # Set number of samples and its dimensions
            no, dim = 10000, 5
            ori_data, min_val, max_val = sine_data_generation(no, self.seq_len, dim)
        if self.data_name == 'water':
            is_masked = True if self.data_type == 'masked' else False
            # 训练集一定会shuffle，shuffle不作为参数了
            ori_data = water_data_loading(self.seq_len, self.indicators, self.scalar, self.masked_indicator,
                                          shuffle=True, is_masked=is_masked)
        print(self.data_name + ' dataset is ready.')

        return ori_data

    def load_masked_data(self):
        # 只有插值时调用，不作为属性，因为提前加载浪费内存，只用来加载有缺失的数据，然后进行插值 shuffle=False，is_masked=True
        if self.data_name in ['stock', 'energy']:
            pass
        elif self.data_name == 'water':
            masked_data = water_data_loading(self.seq_len, self.indicators, self.scalar, self.masked_indicator,
                                             shuffle=False, is_masked=True)
        return masked_data

    def load_ori_data(self):
        # 原始完整数据，用于计算评价指标（train和impute）
        # 只有评价和impute时使用，不作为属性，因为提前加载浪费内存,
        # 由于impute的时候要使用，需要计算mae，所以不能shuffle，shuffle=False, is_masked=False todo 对于评价，需要shuffle吗
        if self.data_name in ['stock', 'energy']:
            pass
        elif self.data_name == 'water':
            ori_data = water_data_loading(self.seq_len, self.indicators, self.scalar, shuffle=False, is_masked=False)
        return ori_data

    def load_imputed_data(self, data_path, seq_len):
        df = pd.read_csv(data_path)
        ori_data = df.to_numpy()
        ori_data = self.scalar.normalize(ori_data, False)
        windowed_data = []
        for i in range(0, len(ori_data) - seq_len):
            _x = ori_data[i:i + seq_len]
            windowed_data.append(_x)
        data = windowed_data
        data = np.asarray(data)
        return data

    # todo batch_gen,next_batch调用的地方有点多，暂时不放进类里面，即dataset不提供遍历操作


def MinMaxScalar_func(data):
    min_val = np.min(data, 0)
    max_val = np.max(data, 0)
    numerator = data - min_val
    denominator = max_val - min_val
    norm_data = numerator / (denominator + 1e-7)
    return norm_data, min_val, max_val


def sine_data_generation(no, seq_len, dim):
    # Initialize the output
    data = list()

    # Generate sine data
    for i in range(no):
        # Initialize each time-series
        temp = list()
        # For each feature
        for k in range(dim):
            # Randomly drawn frequency and phase
            freq = np.random.uniform(0, 0.1)
            phase = np.random.uniform(0, 0.1)

            # Generate sine signal based on the drawn frequency and phase
            temp_data = [np.sin(freq * j + phase) for j in range(seq_len)]
            temp.append(temp_data)

        # Align row/column
        temp = np.transpose(np.asarray(temp))
        # Normalize to [0,1]
        temp = (temp + 1) * 0.5
        # Stack the generated data
        data.append(temp)

    return data


def real_data_loading(data_name, seq_len):
    # 只用于stock和energy数据集，
    # 预处理包括：归一化，窗口化，shuffle
    if data_name == 'stock':
        ori_data = np.loadtxt(dirname(dirname(abspath(__file__))) + '/data/stock_data.csv', delimiter=",", skiprows=1)
    elif data_name == 'energy':
        ori_data = np.loadtxt(dirname(dirname(abspath(__file__))) + '/data/energy_data.csv', delimiter=",", skiprows=1)

    # Flip the data to make chronological data, 为什么逆序，没准原始数据是09-04年
    ori_data = ori_data[::-1]
    # Normalize the data
    ori_data, min_val, max_val = MinMaxScalar_func(ori_data)

    # Preprocess the dataset
    temp_data = []
    # Cut data by sequence length，划分窗口
    for i in range(0, len(ori_data) - seq_len):
        _x = ori_data[i:i + seq_len]
        temp_data.append(_x)

    # Mix the datasets (to make it similar to i.i.d) iid独立同分布，shuffle一下
    idx = np.random.permutation(len(temp_data))
    data = []
    for i in range(len(temp_data)):
        data.append(temp_data[idx[i]])

    return data, min_val, max_val


def water_data_loading(seq_len, indicators, scalar: NormScalar, masked_indicator='',
                       shuffle=True, is_normalize=True, is_masked=False):
    '''

    Args:
        前三个指标是必须的，后四个可以使用默认或指定
        seq_len: 用于窗口化
        indicators: 用于加载哪些指标
        scalar: 无论是完整数据集还是有缺失的数据集，全部都使用统一的min,max(mean,std)（来自完整数据集）
        masked_indicator: 用于确定加载哪一个mask数据集
        shuffle:
        is_normalize:
        is_masked: 是否加载有缺失的数据

    Returns:

    '''
    if is_masked:  # 加载有缺失数据
        if masked_indicator == '':
            data_path = os.path.join(dirname(dirname(abspath(__file__))), 'data', 'masked_water_data.csv')
        else:
            data_path = os.path.join(dirname(dirname(abspath(__file__))), 'data',
                                     'masked_water_' + masked_indicator + '_data.csv')
    else:  # 加载完整数据集的
        data_path = 'water_data.csv'

    # 加载水质数据，包括归一化，窗口化，shuffle
    df = pd.read_csv(join(dirname(dirname(abspath(__file__))), 'data', data_path))
    df = df[indicators.split(',')]
    ori_data = df.to_numpy()

    # Preprocess the dataset
    # 1.归一化
    if is_normalize:
        ori_data = scalar.normalize(ori_data, is_masked)

    # 2.窗口化
    windowed_data = []
    for i in range(0, len(ori_data) - seq_len):
        _x = ori_data[i:i + seq_len]
        windowed_data.append(_x)

    # 3.shuffle
    if shuffle:
        idx = np.random.permutation(len(windowed_data))
        data = []
        for i in range(len(windowed_data)):
            data.append(windowed_data[idx[i]])
    else:
        data = windowed_data

    data = np.asarray(data)
    return data


# 随机取，不是按顺序
def batch_generator(data, time, batch_size):
    """Mini-batch generator.

    Args:
      - data: time-series data
      - time: time information
      - batch_size: the number of samples in each batch

    Returns:
      - X_mb: time-series data in each batch, list:bsz=128, ndarray [seq_len, feature_dim]
      - T_mb: time information in each batch
  """
    no = len(data)
    idx = np.random.permutation(no)
    train_idx = idx[:batch_size]

    X_mb = list(data[i] for i in train_idx)
    # T_mb = list(time[i] for i in train_idx)

    return X_mb, None


def next_batch(data, batch_size):
    '''
        data: [n_samples, seq_len, feature_size]
        batch_size:
    '''

    i = 1
    while i*batch_size <= len(data):
        yield data[(i-1)*batch_size:i*batch_size]
        i += 1


def get_delta_pre(input: list, delta_type='normal') -> list:
    seq_len, feature_dim = input[0].shape
    m = (np.asarray(input) != 0).astype(np.int)

    # 1. 根据m获取时间间隔矩阵
    time_delta = 4  # 4h
    delta_pre = []
    for one_m in m:  # 遍历bsz, one_m [seq_len, feature_dim]
        one_delta_pre = []  # [seq_len, feature_dim]

        for i in range(seq_len):  # 遍历seq_len
            tmp_delta_pre = [0.0] * feature_dim
            one_delta_pre.append(tmp_delta_pre)
            if i == 0:
                continue
            else:
                for j in range(feature_dim):  # 遍历feature_dim
                    if one_m[i - 1][j] == 1:  # 上一时间点存在，就是两个时间点的时间间隔
                        one_delta_pre[i][j] = time_delta  # todo 上一个值有效，不要衰减所以delta=0 or time_delta
                    elif one_m[i - 1][j] == 0:  # 上一个不存在，时间间隔+（上一个,据上一个的间隔）
                        one_delta_pre[i][j] = time_delta + one_delta_pre[i - 1][j]
        delta_pre.append(one_delta_pre)
    delta_pre = np.asarray(delta_pre)

    if delta_type != 'normal':
        m = (np.asarray(input) == 0)
        delta_pre[m] = time_delta if delta_type=='time_delta' else 0 # todo 输入为0或者time_delta 的delta为time_delta，表示不衰减，正常看待上一个时间点
    # 2.计算权重衰减向量
    return delta_pre.tolist()  # 要转化为list，这样变为tensor效率更高


if __name__ == '__main__':
    pass
