"""Reimplement TimeGAN-pytorch Codebase.

Reference: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar, 
"Time-series Generative Adversarial Networks," 
Neural Information Processing Systems (NeurIPS), 2019.

Paper link: https://papers.nips.cc/paper/8789-time-series-generative-adversarial-networks

Last updated Date: October 18th 2021
Code author: Zhiwei Zhang (bitzzw@gmail.com)

-----------------------------

utils.py

(1) train_test_divide: Divide train and test data for both original and synthetic data.
(2) extract_time: Returns Maximum sequence length and each sequence length.
(3) random_generator: random vector generator
(4) NormMinMax: return data info
"""

## Necessary Packages
import numpy as np
import logging

import sys
import os
import pandas as pd
from os.path import dirname, abspath
import matplotlib.pyplot as plt

# from sklearn.ensemble import IsolationForest


def train_test_divide(data_x, data_x_hat, data_t, data_t_hat, train_rate=0.8):
    """Divide train and test data for both original and synthetic data.
  
  Args:
    - data_x: original data
    - data_x_hat: generated data
    - data_t: original time
    - data_t_hat: generated time
    - train_rate: ratio of training data from the original data
  """
    # Divide train/test index (original data)
    # np.random.seed(1028)  # 划分数据集的时候固定一下随机数，每次用相同数据集进行训练
    no = len(data_x)
    idx = np.random.permutation(no)
    train_idx = idx[:int(no * train_rate)]
    test_idx = idx[int(no * train_rate):]

    train_x = [data_x[i] for i in train_idx]
    test_x = [data_x[i] for i in test_idx]
    train_t = [data_t[i] for i in train_idx]
    test_t = [data_t[i] for i in test_idx]

    # Divide train/test index (synthetic data)
    no = len(data_x_hat)
    idx = np.random.permutation(no)
    train_idx = idx[:int(no * train_rate)]
    test_idx = idx[int(no * train_rate):]

    train_x_hat = [data_x_hat[i] for i in train_idx]
    test_x_hat = [data_x_hat[i] for i in test_idx]
    train_t_hat = [data_t_hat[i] for i in train_idx]
    test_t_hat = [data_t_hat[i] for i in test_idx]

    return train_x, train_x_hat, test_x, test_x_hat, train_t, train_t_hat, test_t, test_t_hat


def extract_time(data):
    """Returns Maximum sequence length and each sequence length.
  返回序列最大长度，和每个序列的长度
  Args:
    - data: original data
    
  Returns:
    - time: extracted time information
    - max_seq_len: maximum sequence length
  """
    time = list()
    max_seq_len = 0
    for i in range(len(data)):
        max_seq_len = max(max_seq_len, len(data[i][:, 0]))
        time.append(len(data[i][:, 0]))

    return time, max_seq_len


def random_generator(batch_size, z_dim, T_mb, max_seq_len):
    """Random vector generation.
  
  Args:
    - batch_size: size of the random vector
    - z_dim: dimension of random vector
    - T_mb: time information for the random vector
    - max_seq_len: maximum sequence length
    
  Returns:
    - Z_mb: generated random vector
  """
    Z_mb = list()
    for i in range(batch_size):
        temp = np.zeros([max_seq_len, z_dim])
        temp_Z = np.random.uniform(0., 1, [T_mb[i], z_dim])
        temp[:T_mb[i], :] = temp_Z
        Z_mb.append(temp_Z)
    return Z_mb


def NormMinMax(data):
    """Min-Max Normalizer.

    Args:
      - data: raw data, [n_samples, seq_len, feature_dim]

    Returns:
      - norm_data: normalized data
      - min_val: minimum values (for renormalization)
      - max_val: maximum values (for renormalization)
    """
    min_val = np.min(np.min(data, axis=0), axis=0)
    data = data - min_val  # [3661, 24, 6]

    max_val = np.max(np.max(data, axis=0), axis=0)
    norm_data = data / (max_val + 1e-7)

    return norm_data, min_val, max_val


def get_logger(log_dir, name, log_filename='info.log', level=logging.INFO):
    # name = log_dir,避免用同一个logger
    logger = logging.getLogger(name)
    logger.setLevel(level)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')

    # Add file handler and stdout handler
    if not logger.handlers:  # 只有handler为空的时候才添加，避免重复输出日志
        if log_dir != '':  # 如果由log_dir是开发环境
            if not os.path.exists(log_dir):
                os.makedirs(log_dir)
            file_handler = logging.FileHandler(os.path.join(log_dir, log_filename))
            file_handler.setFormatter(formatter)
            logger.addHandler(file_handler)

        # Add console handler.
        console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        console_handler = logging.StreamHandler(sys.stdout)
        console_handler.setFormatter(console_formatter)
        logger.addHandler(console_handler)

    # Add google cloud log handler
    logger.info('Log directory: %s', log_dir)
    return logger


def save_data(data, file_name):
    # data numpy数据
    # data = data[::-1]
    # dtype = {'Open': float, 'High': float, 'Low': float, 'Close': float, 'Adj_Close': float, 'Volume': int}
    # df = pd.DataFrame(data, columns=['Open', 'High', 'Low', 'Close', 'Adj_Close', 'Volume'])
    # df = df.astype(dtype)
    df = pd.DataFrame(data)
    df.to_csv(os.path.join('data', file_name+'.csv'), index=False)


def mask_data(data_type, ratio, masked_indicator=None):
    '''
    
    Args:
        data_type: 
        ratio: 
        masked_indicator: 如果不指定就是所有指标

    Returns:

    '''
    data_path = os.path.join('data', data_type+'_data.csv')
    df = pd.read_csv(data_path)
    columns = df.columns
    dtypes = df.dtypes
    ori_data = df.to_numpy()

    n_samples = ori_data.shape[0]
    feature_dim = ori_data.shape[-1]
    seq_len = 24
    conti_prob = 0.8  # 连续缺失的概率大一点，因为传感器失效不是一时半会的

    nan_size = int(seq_len * ratio)
    def mask_indicator(i):
        for j in range(0, n_samples-seq_len, seq_len):
            if np.random.choice([0, 1], p=[1-conti_prob, conti_prob]):  # 0非连续，1连续
                where = np.random.randint(0, seq_len - nan_size)  # 随机选一个位置
                ori_data[j+where:j+where+nan_size, i] = 0
            else:
                nan_idx = np.random.randint(low=0, high=seq_len, size=nan_size)
                ori_data[j+nan_idx, i] = 0
    if masked_indicator is None:
        for i in range(1, feature_dim, 1):
            mask_indicator(i)
        file_name = 'masked_{}_data.csv'.format(data_type)
    else:
        i = columns.get_loc(masked_indicator)
        mask_indicator(i)
        file_name = 'masked_{}_{}_data.csv'.format(data_type, masked_indicator)

    df = pd.DataFrame(ori_data, columns=columns)
    df = df.astype(dtypes)
    df.to_csv(os.path.join('data', file_name), index=False)


def mask_data2(data_path, data_type, indicator, ratio):
    '''
    只擦除要预测的指标
    Args:
        data_type:
        indicator:
        ratio:

    Returns:

    '''
    # data_path = os.path.join('data', data_type+'_data.csv')
    df = pd.read_csv(data_path)
    columns = df.columns
    dtypes = df.dtypes
    ori_data = df.to_numpy()

    n_samples = ori_data.shape[0]
    mask = np.random.randint(0, n_samples, int(n_samples*ratio))

    df = pd.DataFrame(ori_data, columns=columns)
    df = df.astype(dtypes)
    for idx in mask:
        df[indicator][idx] = 0
    df.to_csv(os.path.join('data', 'masked_'+data_type+'_'+indicator+'_data.csv'), index=False)


def three_sigma(data_df, indicator):
    '''
     比较符合正太分布，3σ异常，数值分布在（μ-3σ,μ+3σ)中的概率为99.73%，超过这个范围的极大或极小值，那就是异常值了
    Args:
        data_df: 原始数据，
        indicator: 进行判断的指标

    Returns:

    '''
    serise = data_df[indicator]
    mean = serise.mean()
    std = serise.std()
    min_val, max_val = (mean - 3*std, mean + 3*std)
    res = data_df[(serise > min_val) & (serise < max_val)]
    return res


def wgn(sequence, snr):
    # seq 是 [bsz, seq_len, feature_dim]
    bsz, seq_len, feature_dim = sequence.shape
    r = []

    for seq in sequence:
        # seq [seq_len, feature_dim]
        seq_add_noise = []
        for i in range(feature_dim):
            series = seq[:, i]  # [seq_len]
            signal_add_noise = wgn_per_seq(series, snr)  # [seq_len]

            seq_add_noise.append(signal_add_noise)

        r.append(np.stack(seq_add_noise,axis=1))
    return np.stack(r)


def wgn_per_seq(sequence, snr):
    Ps = np.sum(abs(sequence) ** 2) / len(sequence)
    Pn = Ps / (10 ** ((snr / 10)))
    noise = np.random.randn(len(sequence)) * np.sqrt(Pn)
    signal_add_noise = sequence + noise
    return signal_add_noise

# def random_forest(data_df, indicator):
#     serise = data_df[indicator]
#     rng = np.random.RandomState(42)
#     # 构造训练样本
#     max_samples = 256  # 每棵树的子采样样本总数
#     outliers_fraction = 0.02  # 异常样本比例
#
#     clf = IsolationForest(max_samples=max_samples, random_state=rng, contamination=outliers_fraction)
#     input_data = serise.values.reshape(-1, 1)
#     clf.fit(input_data)  # n_sample, feature_dim
#
#     scores_pred = clf.decision_function(input_data)
#     is_anomaly = clf.predict(input_data)
#
#     return data_df[is_anomaly == 1]

# def random_forest2(data_df):
#     # 所有指标一起去除异常值
#     rng = np.random.RandomState(42)
#     # 构造训练样本
#     max_samples = 256  # 每棵树的子采样样本总数
#     outliers_fraction = 0.01  # 异常样本比例
#
#     clf = IsolationForest(max_samples=max_samples, random_state=rng, contamination=outliers_fraction)
#     input_data = data_df.values
#     clf.fit(input_data)  # n_sample, feature_dim
#
#     scores_pred = clf.decision_function(input_data)
#     is_anomaly = clf.predict(input_data)
#
#     return data_df[is_anomaly == 1]

if __name__ == '__main__':
    # df = pd.read_csv('data/ori_water_data.csv')
    # df = df[df.columns[1:]]
    # df = random_forest2(df)
    # df.to_csv(os.path.join('data', 'water_data.csv'), index=False)
    #
    #
    # # res = three_sigma(df, 'DISSOLVED_OXYGEN')
    # res2 = random_forest(df, 'DISSOLVED_OXYGEN')
    # res2.to_csv(os.path.join('data', 'water_data.csv'), index=False)


    mask_data2('data/water_data.csv', 'water', 'DISSOLVED_OXYGEN', 0.30)
    mask_data('water', 0.25)
    mask_data('water', 0.3)

