import os
import pywt
import json
import pickle
import argparse

import numpy as np
import pandas as pd
from collections import OrderedDict
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.decomposition import KernelPCA

from pyhht import EMD, plot_imfs
from scipy.signal import hilbert
import matplotlib.pyplot as plt
def process_data(data, hours=0):

    # 加载数据并保存
    data = np.array(data)
    data = data.transpose(1, 0)
    bearing_data = data[np.newaxis, :, :]
    # process_functions = [cal_fft_feature, cal_seq_feature, cal_time_feature, cal_wavelet_feature]
    process_functions = [cal_fft_feature, lambda data: cal_seq_feature(data, hours), cal_time_feature,
                         cal_wavelet_feature]

    if len(bearing_data) == 0:
        print(f"[warnning][data is not existed!]")
        return

    data = np.concatenate([func(bearing_data) for func in process_functions], axis=1)

    # 加载保存的特征变换对象
    transforms_path = 'D:\desktop/rul_system/system/data/phm2012/transformation.pkl'
    with open(transforms_path, 'rb') as file:
        fea_transforms = pickle.load(file)
    # 使用加载的特征变换对象来标准化新数据
    for i in range(data.shape[1]):
        data[:, [i]] = fea_transforms[i].transform(data[:, [i]])

    # file_path = os.path.join(save_path, "line_data")
    # with open(file_path, 'wb') as f:
    #     pickle.dump(data, f)
    return data

def main(data_path, save_path):
    os.makedirs(save_path, exist_ok=True)

    # 加载数据并保存
    data = load_files(data_path)
    # data = np.array(data_dict)
    # data_dict = load_phm2012(data_path, need_process, True, full_test)
    # 加载保存的特征变换对象
    transforms_path = 'D:\desktop/rul_system/system/data/phm2012/transformation.pkl'
    with open(transforms_path, 'rb') as file:
        fea_transforms = pickle.load(file)
    # 使用加载的特征变换对象来标准化新数据
    for i in range(data.shape[1]):
        data[:, [i]] = fea_transforms[i].transform(data[:, [i]])

    file_path = os.path.join(save_path, "line_data")
    with open(file_path, 'wb') as f:
        pickle.dump(data, f)


def save_numpy_data(path, data_dict, suffix):
    for name, data in data_dict.items():
        np.save(os.path.join(path, f"{suffix}_{name}.npy"), data)



# 最大最小归一化
def prep_data(batch_data, transforms=None, keep_features=64):
    fea_size = batch_data.shape[1]

    if transforms is not None:
        assert len(transforms) == fea_size
        feature_transforms = transforms
    else:
        feature_transforms = [None for _ in range(fea_size)]

    # # 随机打乱数据
    # np.random.shuffle(batch_data)

    for i in range(fea_size):
        if transforms is None:
            # feature_transforms[i] = MinMaxScaler(feature_range=(0, 1))
            feature_transforms[i] = StandardScaler()
            batch_data[:, [i]] = feature_transforms[i].fit_transform(batch_data[:, [i]])
        else:
            batch_data[:, [i]] = feature_transforms[i].transform(batch_data[:, [i]])
    # if transforms is None:
    #     feature_transforms[-1] = KernelPCA(keep_features, kernel="rbf")
    #     batch_data = feature_transforms[-1].fit_transform(batch_data)
    # else:
    #     batch_data = feature_transforms[-1].transform(batch_data)

    return batch_data, feature_transforms


def load_files(dir_path):
    process_functions = [cal_fft_feature, cal_seq_feature, cal_time_feature, cal_wavelet_feature]
    # bearing_data = []

    file_path = dir_path
    df = pd.read_csv(dir_path, header=None)
    if df.shape[1] < 6:
        df = pd.read_csv(file_path, sep=";", header=None)
    data = np.array(df.loc[:, 4:6])
    data = data.transpose(1, 0)
    bearing_data = data[np.newaxis, :, :]


    # file_path = dir_path
    # bearing_data = []
    # if not os.path.isdir(file_path):
    #     print(f"[warnning][{file_path} is not existed!]")
    # file_names = os.listdir(file_path)
    # file_names.sort()
    # date = ["", ""]
    # for i, file_name in enumerate(file_names):
    #     if not file_name.endswith("csv") or "acc" not in file_name:
    #         continue
    #     df = pd.read_csv(os.path.join(file_path, file_name), header=None)
    #     if df.shape[1] < 6:
    #         df = pd.read_csv(os.path.join(file_path, file_name), sep=";", header=None)
    #     if i == 0:
    #         date[0] = ":".join(str(int(x)) for x in df.loc[0, :2])
    #     else:
    #         date[1] = ":".join(str(int(x)) for x in df.loc[0, :2])
    #     data = np.array(df.loc[:, 4:6])
    #     data = data.transpose(1, 0)
    #     data = data[np.newaxis, :, :]
    #     bearing_data.append(data)
    # bearing_data = np.concatenate(bearing_data, axis=0)
    # seq_size = bearing_data.shape[0]

    if len(bearing_data) == 0:
        print(f"[warnning][{dir_path} is not existed!]")
        return

    bearing_data = np.concatenate([func(bearing_data) for func in process_functions], axis=1)

    return bearing_data


def cal_time_feature(data):
    fea_dict = OrderedDict()
    fea_dict['mean'] = np.mean(data, axis=2, keepdims=True)
    fea_dict['rms'] = np.sqrt(np.mean(data ** 2, axis=2, keepdims=True))
    # fea_dict['kur'] = np.sum((data-fea_dict['mean'].repeat(data.shape[2], axis=2))**4, axis=2) \
    #     / (np.var(data,axis=2) ** 2 * data.shape[2])
    # fea_dict['kur'] = fea_dict['kur'][:,:,np.newaxis]
    # fea_dict['skew'] = np.sum((data-fea_dict['mean'].repeat(data.shape[2],axis=2))**3,axis=2) \
    #     / (np.var(data,axis=2)**(3/2) * data.shape[2])
    # fea_dict['skew'] = fea_dict['skew'][:,:,np.newaxis]
    # fea_dict['p2p'] = np.max(data, axis=2, keepdims=True) - np.min(data, axis=2,keepdims=True)
    fea_dict['var'] = np.var(data, axis=2, keepdims=True)
    # fea_dict['cre'] = np.max(abs(data), axis=2, keepdims=True) / fea_dict['rms']
    # fea_dict['imp'] = np.max(abs(data), axis=2, keepdims=True) \
    #     / np.mean(abs(data), axis=2, keepdims=True)
    # fea_dict['mar'] = np.max(abs(data), axis=2, keepdims=True) \
    #     / (np.mean((abs(data)) ** 0.5, axis=2, keepdims=True))**2
    # fea_dict['sha'] = fea_dict['rms'] / np.mean(abs(data),axis=2,keepdims=True)
    # fea_dict['smr'] = (np.mean((abs(data))**0.5,axis=2,keepdims=True))**2
    # fea_dict['cle'] = fea_dict['p2p'] / fea_dict['smr']

    fea = np.concatenate(tuple(x for x in fea_dict.values()), axis=2)
    fea = fea.reshape(-1, fea.shape[1] * fea.shape[2])
    return fea


def cal_fft_feature(data, freq=2560):
    # hht变换
    # data_shape = data.shape
    # hilbert_data = np.zeros_like(data)
    # for i in range(data_shape[0]):
    #     for j in range(data_shape[1]):
    #         raw_data = data[i, j, :]
    #         eegRawHT = calculate_marginal_spectrum(raw_data)
    #         hilbert_data[i, j] = eegRawHT

    # 去除直流分量
    data = data - np.mean(data, axis=2, keepdims=True)
    sub_freq = int(freq / 5)
    freq_band = int(sub_freq / 2)
    fft_data = np.fft.fft(data, axis=2) / sub_freq

    fft_data = np.abs(fft_data) ** 2
    fea_list = []

    zero = int(fft_data.shape[2] / 2)

    for i in range(5):
        left = zero - i * freq_band
        right = zero + i * freq_band
        fea_list.append(np.sum(fft_data[:, :, left - freq_band: left], axis=2, keepdims=True) +
                        np.sum(fft_data[:, :, right: right + freq_band + 1], axis=2, keepdims=True))

    fea = np.concatenate(fea_list, axis=2)
    fea = fea.reshape(-1, fea.shape[1] * fea.shape[2])
    return fea


# 生成与序列长度等长的单减序列，每个轴承生成的序列长度不同，会导致在之后的
# 标准后，短的序列中特征无法从归一到0-1。
def cal_seq_feature(data, hours):
    n = data.shape[0]
    x = np.arange(hours, hours + n).reshape(n, 1)
    return x


def cal_wavelet_feature(data, freq=2560, num_levels=4, wavelet_function="db4", keep_size=1):
    wp = pywt.WaveletPacket(data, wavelet=wavelet_function, maxlevel=num_levels, axis=-1)

    packet_names = [node.path for node in wp.get_level(num_levels, "natural")]
    features = []
    num_samples = data.shape[-1]
    f = np.linspace(0.0, (freq / 2.0), num_samples // 2)
    for p in packet_names:
        new_wp = pywt.WaveletPacket(data=None, wavelet="db4", maxlevel=num_levels, axis=-1)
        new_wp[p] = wp[p].data
        reconstructed_signal = new_wp.reconstruct(update=False)
        reconstructed_feature = cal_fft_feature(reconstructed_signal, freq)
        features += [reconstructed_feature]
        # freq_values = np.fft.fft(reconstructed_signal, axis=-1)
        # freq_values = 2.0 / num_samples * np.abs(freq_values[...,0: (num_samples // 2)])
        # z = abs(freq_values)
        # maximal_idx = np.argpartition(z, keep_size, axis=-1)[..., :keep_size]
        # high_amp = np.take_along_axis(z, maximal_idx, axis=-1)
        # high_freq = f[maximal_idx]
        # features += [high_amp, high_freq]
    features = np.concatenate(features, axis=-1).reshape(data.shape[0], -1)
    return features


if __name__ == "__main__":
    print("# Loading the data:")

    parser = argparse.ArgumentParser()
    parser.add_argument("--data-path", type=str, dest="data_path", help="phm data path",
                        default="D:\desktop/rul_system/acc_01000.csv")
    parser.add_argument("--save-path", type=str, dest="save_path", help="meta information save path",
                        default="./data/system_csv")

    args = parser.parse_args()
    data_path = args.data_path
    save_path = args.save_path

    main(data_path, save_path)
