# -*- encoding: utf-8 -*-
'''
@File    :   work_flow.py
@Time    :   2021/11/26 13:43
@Author  :   ZhangChaoYang
@Desc    :   工作流步骤封装
'''

import os
import numpy as np
import tensorflow as tf
from sklearn import model_selection
from util.normalization import layer_norm
from util.signal_processing import fft, sfft, cwt, statistics_feature

trans_collection = ['original', 'fft', 'stat', 'sfft', 'cwt']
classes = ['normal', 'ball', 'inner', 'outer']
class2label = {v: i for i, v in enumerate(classes)}


def get_label_by_class1(cls):
    '''
    生成多分类任务的标签
    :param cls: 取classes中的一种
    :return:
    '''
    label = np.zeros(shape=(len(classes),))
    label[class2label.get(cls, -1)] = 1  # one-hot表示
    return label


def get_label_by_class2(cls):
    '''
    生成二分类任务的标签
    :param cls: 取classes中的一种
    :return:
    '''
    if cls == "normal":
        return 1.0
    else:
        return 0.0


def gen_outfile(data_dim, corpus, data_trans, model_name, work_loads=[]):
    if work_loads:
        normal_data_files = [os.path.join("corpus", data_dim, corpus, data_trans, work_load, "normal.npy") for
                             work_load in work_loads]
    else:
        normal_data_files = [os.path.join("corpus", data_dim, corpus, data_trans, "normal.npy")]
    anomaly_data_file = os.path.join("corpus", data_dim, corpus, data_trans, "anomaly.npy")
    train_history_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name + "_train_history.png")
    check_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name + "_check.png")
    model_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name)
    scaler_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name + "_scaler")
    if not os.path.exists(os.path.dirname(model_file)):
        os.makedirs(os.path.dirname(model_file))

    return (normal_data_files, anomaly_data_file, train_history_file, check_file, model_file, scaler_file)


def cut_sequence(sequence, window_size, step_roll_ratio=1.0):
    '''
    把1维长数组切分成很多1维小数组
    :param sequence: 1维长数组
    :param window_size: 每个小数组的长度
    :param step_roll_ratio: 滑动步长占window_size的比例
    :return:
    '''
    if len(sequence) < window_size:
        return None
    move_step = int(window_size * step_roll_ratio)
    if move_step <= 0:
        move_step = window_size
    rect = []
    for begin in range(0, len(sequence) - window_size, move_step):
        end = begin + window_size
        rect.append(sequence[begin:end])
    return np.asarray(rect)


def transform(trans, array, window_size, step_roll_ratio, fs):
    '''
    把原始数据按照指定的转换函数，变成很多个样本
    :param trans:
    :param array:
    :param window_size:
    :param step_roll_ratio:
    :param fs:
    :return:
    '''
    x = cut_sequence(array, window_size, step_roll_ratio)
    if trans == "original":
        return np.expand_dims(x, -1)  # 升维
    elif trans == "fft":
        _, x = fft(x, fs)
        return np.expand_dims(x[:, :-1], -1)
    elif trans == "stat":
        return np.expand_dims(np.stack(statistics_feature(x), axis=1), -1)
    elif trans == "sfft":
        _, _, x = sfft(x, fs)
        return x[:, :-1, :-1]
    elif trans == "cwt":
        _, matrix = cwt(x[0], fs)
        results = np.array(matrix, dtype=float)
        for sample in x[1:]:
            _, matrix = cwt(sample, fs)
            results = np.vstack([results, np.array(matrix, dtype=float)])
        return results


def preprocess(normal_data_files, anomaly_data_file, data_dim, normalize=False):
    X = np.load(normal_data_files[0])
    for normal_data_file in normal_data_files[1:]:
        X = np.vstack([X, np.load(normal_data_file)])
    ano_X = np.load(anomaly_data_file)
    if normalize:  # 不论行间还是行内归一化对异常识别都是无益的
        X = layer_norm(X)
        ano_X = layer_norm(ano_X)
    if data_dim == "1d":
        X = np.expand_dims(X, -1)  # 从1维曲线变成2维图片
        ano_X = np.expand_dims(ano_X, -1)
    elif data_dim == "2d":
        pass
    else:
        raise Exception("unsupported data dim {}".format(data_dim))
    print("normal_data_files", normal_data_files)
    print("anomaly_data_file", anomaly_data_file)
    print("X shape", X.shape)
    print("ano_X shape", ano_X.shape)
    return X, ano_X


def preprocess1(data_files, data_dim, normalize=False):
    if len(data_files) == 0:
        return None
    X = np.load(data_files[0])
    for data_file in data_files[1:]:
        X = np.vstack([X, np.load(data_file)])
    if normalize:
        X = layer_norm(X)
    if data_dim == "1d":
        X = np.expand_dims(X, -1)  # 从1维曲线变成2维图片
    elif data_dim == "2d":
        pass
    else:
        raise Exception("unsupported data dim {}".format(data_dim))
    print("data_files", data_files)
    print("X shape", X.shape)
    return X


def save_model(model, model_file):
    model.save(model_file)


def load_model(model_file):
    model = tf.keras.models.load_model(model_file, compile=False)
    return model


def train_test_split(X, test_ratio=0.2):
    X_train, X_test = model_selection.train_test_split(X, test_size=test_ratio, random_state=864651)
    return X_train, X_test


def train_test_split_limit_memory(X, test_ratio=0.2):
    np.random.shuffle(X)  # 每轮都打乱训练样本
    n = X.shape[0]
    split = int((1 - test_ratio) * n)
    X_train, X_test = X[:split], X[split:]
    return X_train, X_test


def read_sample(sample_dir, multi_class=False):
    '''
    从一个目录下读取各个类别的样本。该目录下应该存在这些文件：cls.py for cls in classes
    :param sample_dir:
    :param multi_class: 是否为多分类任务
    :return:
    '''
    xarray = None
    yarray = None
    # print(f"corpus_dir {sample_dir}")
    files = os.listdir(sample_dir)
    for file in files:
        X = np.load(os.path.join(sample_dir, file))
        if xarray is None:
            xarray = X
        else:
            xarray = np.vstack([xarray, X])
        cls = file[:-4]
        # print(f'{cls} sample count {X.shape[0]}')
        if multi_class:
            label = get_label_by_class1(cls)
        else:
            label = get_label_by_class2(cls)
        y = np.tile(label, reps=(X.shape[0], 1))
        if yarray is None:
            yarray = y
        else:
            yarray = np.vstack([yarray, y])
    return xarray, yarray


if __name__ == '__main__':
    X = tf.random.normal(shape=(2598,))
    X_train, X_test = train_test_split(X)
    print(X_train.shape)
