import pandas as pd
import numpy as np
import pickle
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import math
import joblib
# plt.switch_backend('agg')

# 重采样
def sequenceResize(source, length):
    for j in range(source.shape[1]):
        tmp = source[:, j]
        out = []
        for i in range(length):
            key = i * (len(tmp) - 1) / (length - 1)
            low = tmp[int(math.floor(key))]
            high = tmp[int(math.ceil(key))]
            ratio = key % 1
            out.append((1 - ratio) * low + ratio * high)
        temp = np.array(out).reshape(-1, 1)
        try:
            generate = np.concatenate((generate, temp), axis=1)
        except:
            generate = temp
    return generate


# 标准化数据
def standardize(data_path):
    data = pd.read_csv(data_path, engine='python', encoding="gb2312")
    #     区分试验数据和常温数据
    experiment = data[(data['是否试验数据'] == 1)].dropna(axis=1)
    normal = data[(data['是否试验数据'] == 0)].dropna(axis=1)
    #     产品数
    product_num = int(data['产品ID'].max())

    x = np.zeros((product_num, 200, normal.values.shape[1] - 2))
    y = np.zeros((product_num, 200, experiment.values.shape[1] - 2))
    new_x = np.zeros((product_num, 200, normal.values.shape[1] - 2))
    new_y = np.zeros((product_num, 200, experiment.values.shape[1] - 2))

    for product_id in range(product_num):
        data_product_experiment = sequenceResize(experiment[(experiment['产品ID'] == product_id + 1)].values[:, 2:],
                                                 200)
        data_product_normal = sequenceResize(normal[(normal['产品ID'] == product_id + 1)].values[:, 2:], 200)
        x[product_id] = data_product_normal
        y[product_id] = data_product_experiment
    standardscaler0 = StandardScaler()
    tempx0 = standardscaler0.fit_transform(x[:, :, 0].reshape(-1, 1))
    new_x[:, :, 0] = tempx0.reshape(new_x.shape[0], new_x.shape[1])
    joblib.dump(standardscaler0, './normalize_data/train/scalerx0.pkl')
    index = [column for column in data][2:]
    for i in range(1, x.shape[2]):
        exec("standardscaler%s=StandardScaler()" % i)
        exec("standardscaler_%s=StandardScaler()" % i)
        exec("tempx%s=standardscaler%s.fit_transform(x[:,:,i].reshape(-1,1))" % (i, i))
        exec("tempy%s=standardscaler_%s.fit_transform(y[:,:,i-1].reshape(-1,1))" % (i, i))
        exec("new_x[:,:,i]=tempx%s.reshape(new_x.shape[0],new_x.shape[1])" % i)
        exec("new_y[:,:,i-1]=tempy%s.reshape(new_y.shape[0],new_y.shape[1])" % i)
        # 保存标化器
        # exec("pickle.dump(standardscaler%s, open('./normalize_data/train/scalerx%s.pkl','wb'))"% (i, i))
        # exec("pickle.dump(standardscaler_%s, open('./normalize_data/train/scalery%s.pkl','wb'))"% (i, i))
        exec("joblib.dump(standardscaler%s, './normalize_data/train/scalerx%s.pkl')" % (i, i))
        exec("joblib.dump(standardscaler_%s, './normalize_data/train/scalery%s.pkl')" % (i, i))

        plt.figure(figsize=(20, 16), dpi=100)
        ax = plt.subplot(111)
        ax.yaxis.get_offset_text().set_fontsize(40)
        plt.plot(y[0:1, :, i - 1].reshape(-1), linewidth=5)
        # plt.legend(prop={'family': 'SimHei', 'size': 40}, labels=['原始数据'])
        plt.tick_params(labelsize=40)
        plt.title(u'原始数据', fontproperties='SimHei', fontsize=40)
        plt.xlabel(u'时间点', fontproperties='SimHei', fontsize=40)
        plt.ylabel(index[i], fontproperties='SimHei', fontsize=40)
        plt.savefig('./img/experiment' + str(i) + '.png')

        plt.figure(figsize=(20, 16), dpi=100)
        ax = plt.subplot(111)
        ax.yaxis.get_offset_text().set_fontsize(40)
        plt.plot(new_y[0:1, :, i - 1].reshape(-1), linewidth=5)
        # plt.legend(prop={'family': 'SimHei', 'size': 40}, labels=['标准化后数据'])
        plt.tick_params(labelsize=40)
        plt.title(u'标准化后数据', fontproperties='SimHei', fontsize=40)
        plt.xlabel(u'时间点', fontproperties='SimHei', fontsize=40)
        plt.ylabel(index[i], fontproperties='SimHei', fontsize=40)
        plt.savefig('./img/standardization_experiment' + str(i) + '.png')
        plt.close()
    return index[1:], new_x, new_y

def normalize(data_path):
    index, read_datax, read_datay = standardize(data_path)
    print(read_datax.shape)
    np.save("./normalize_data/train/train_x.npy", read_datax)
    np.save("./normalize_data/train/train_y.npy", read_datay)
    return index

if __name__ == '__main__':
    index, read_datax, read_datay = standardize('./testdata/thruster_data.csv')