import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
import scipy.signal as signal
from scipy.fftpack import fft


resample_num = 20000

def proprecessing(path, name, resample_num):
    files = os.listdir(path)
    files.sort(key=lambda x: int(x[4:-4]))
    s = []
    for file in files:
        if not os.path.isdir(path + file):
            f_name = str(file)
            tr = '\\'
            filename = path + tr + f_name
            s.append(filename)

    data_train = np.empty([315, resample_num, 7])

    for i in range(315):
        data1 = pd.read_csv(s[i], names=['Fx', 'Fy', 'Fz', 'Ax', 'Ay', 'Az', 'AE_rms'])
        for j in range(7):
            data2 = data1.iloc[:, j]

            # s1 = int(len(data2) / 2) - 5000
            # s2 = int(len(data2) / 2) + 5000      # 截取中间10000个数据
            # data2 = data2[s1:s2]

            scaler = preprocessing.StandardScaler()
            data2 = np.array(data2).reshape(-1, 1)
            data2 = scaler.fit_transform(data2.reshape(-1, 1))
            data2 = data2.reshape(-1)

            data2 = data2[round(data2.shape[0] // 20): - round(data2.shape[0] // 20)]
            # 截取中间90%的数据
            d = signal.resample(data2, resample_num)
            # 重采样, 数据长度resample_num = 512

            data_train[i:, :, j] = d

            # 进行FFT，获得频域数据
            data_train_fft = fft(data_train)

            print(name, [i, j])
    # print(data_train.shape)
    print(name, 'data_train', data_train.shape)
    print(name, 'data_train_fft', data_train_fft.shape)

    np.save(r'E:\data\data_' + str(name) + '_' + str(resample_num) + '_7_2d.npy', data_train)
    np.save(r'E:\data\data_' + str(name) + '_' + str(resample_num) + '_7_2d_fft.npy', data_train_fft)


def get_labels(path_with_f_name, name):
    data0 = pd.read_csv(path_with_f_name)
    y1 = np.array(data0['flute_1'])
    y2 = np.array(data0['flute_2'])
    y3 = np.array(data0['flute_3'])
    y1 = y1.reshape(y1.shape[0], 1)
    y2 = y2.reshape(y2.shape[0], 1)
    y3 = y3.reshape(y3.shape[0], 1)
    y = np.concatenate((y1, y2, y3), axis=1)
    print('y', y.shape)
    data0 = np.mean(y, 1)    # numpy.mean(a, axis, dtype, out，keepdims )以m*n矩阵为例，axis =1 ：压缩列，对各行求均值，返回 m *1 矩阵
    # data1 = np.max(y, 1)

    print('data0', data0.shape)
    print('data0_mean', np.mean(data0))
    # print('data1', data1.shape)
    # print('data1_mean', np.mean(data1))

    np.save(r'E:\data1\data_' + str(name) + '_labels.npy', data0)
    # np.save(r'E:\data0\data_' + str(name) + '_labels_max.npy', data1)

proprecessing(path=r'E:\data\c1', name='c1', resample_num=resample_num)
proprecessing(path=r'E:\data\c4', name='c4', resample_num=resample_num)
proprecessing(path=r'E:\data\c6', name='c6', resample_num=resample_num)

# get_labels(path_with_f_name=r'E:\data\c1_wear.csv', name='c1')
# get_labels(path_with_f_name=r'E:\data\c4_wear.csv', name='c4')
# get_labels(path_with_f_name=r'E:\data\c6_wear.csv', name='c6')

