#!/usr/bin/env python
# coding: utf-8
from keras.layers import Input,concatenate,Embedding,UpSampling1D,BatchNormalization,Conv1D,MaxPooling1D,Dense,Flatten,Lambda,Dropout,Concatenate,LeakyReLU,BatchNormalization,Reshape,Activation,GlobalAveragePooling1D,AveragePooling1D
from keras import regularizers
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
from keras.models import Sequential, Model
from keras import backend as K
from keras import Model,regularizers

from scipy import signal
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import xlrd
import os
from sklearn.preprocessing import MinMaxScaler
from keras.models import load_model

import math
import keras
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))

import os

os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'

#%%
#读取XJTU数据集水平方向
def readfile_h(path):
    files = os.listdir(path)
    #解决乱序问题，以第5位到倒数第4位之间的数字的大小排序
    # files.sort(key=lambda x:int(x[0:-4]))
    rowdata = []
    for file in files:
        info = path+"/"+file
        #将csv文件里的数据转换成矩阵
        data = np.loadtxt(open(info,"rb"), delimiter=',',skiprows=1)
        #第2行开始，第2列
        rowdata = np.hstack((rowdata,data[:,0]))
    return(rowdata)
#%%
train1_1_h_o = readfile_h('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearing2_1')
train1_2_h_o = readfile_h('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearing2_2')
train1_3_h_o = readfile_h('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearing2_3')
train1_4_h_o = readfile_h('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearing2_4')
train1_5_h_o = readfile_h('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearing2_5')

#%%
#读取XJTU数据集垂直方向
def readfile_v(path):
    files = os.listdir(path)
    #解决乱序问题，以第5位到倒数第4位之间的数字的大小排序
    files.sort(key=lambda x:int(x[0:-4]))
    rowdata = []
    for file in files:
        info = path+"/"+file
        #将csv文件里的数据转换成矩阵
        data = np.loadtxt(open(info,"rb"), delimiter=',',skiprows=1)
        #第2行开始，第2列
        rowdata = np.hstack((rowdata,data[:,1]))
    return(rowdata)

train1_1_v_o= readfile_v('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearing2_1')
train1_2_v_o = readfile_v('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearin2_2')
train1_3_v_o = readfile_v('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearing2_3')
train1_4_v_o = readfile_v('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearing2_4')
train1_5_v_o = readfile_v('E://Datasets//XJTU-SY_Bearing_Datasets//Data//XJTU-SY_Bearing_Datasets//37.5Hz11kN//Bearing2_5')

#%%
# #将数据标准化wyw's code
# ####################### 水平数据标准化 ########################
# xxh1 = train1_1_h_o.reshape((-1, 32768))
# meanh1 = np.mean(xxh1, axis = 0)
# stdh1 = np.std (xxh1, axis = 0)
# train1_1_h = (xxh1 - meanh1) / stdh1
#
# xxh2 = train1_2_h_o.reshape((-1, 32768))
# train1_2_h = (xxh2 - np.mean(xxh2, axis = 0)) / np.std(xxh2, axis = 0)
#
# xxh3 = train1_3_h_o.reshape((-1, 32768))
# train1_3_h = (xxh3 - np.mean(xxh3, axis = 0)) / np.std(xxh3, axis = 0)
#
# xxh4 = train1_4_h_o.reshape((-1, 32768))
# train1_4_h = (xxh4 - np.mean(xxh4, axis = 0)) / np.std(xxh4, axis = 0)
#
# xxh5 = train1_5_h_o.reshape((-1, 32768))
# train1_5_h = (xxh5 - np.mean(xxh5, axis = 0)) / np.std(xxh5, axis = 0)
#
#
# ###################垂直数据标准化######################
# xxh1 = train1_1_v_o.reshape((-1, 32768))
# train1_1_v = (xxh1 - np.mean(xxh1, axis = 0)) / np.std(xxh1, axis = 0)
#
# xxh2 = train1_2_v_o.reshape((-1, 32768))
# train1_2_v = (xxh2 - np.mean(xxh2, axis = 0)) / np.std(xxh2, axis = 0)
#
# xxh3 = train1_3_v_o.reshape((-1, 32768))
# train1_3_v = (xxh3 - np.mean(xxh3, axis = 0)) / np.std(xxh3, axis = 0)
#
# xxh4 = train1_4_v_o.reshape((-1, 32768))
# train1_4_v = (xxh4 - np.mean(xxh4, axis = 0)) / np.std(xxh4, axis = 0)
#
# xxh5 = train1_5_v_o.reshape((-1, 32768))
# train1_5_v = (xxh5 - np.mean(xxh5, axis = 0)) / np.std(xxh5, axis = 0)
#
# #################水平信号与垂直信号拼接#########################
# xtr1_1 = np.dstack((train1_1_h, train1_1_v))
# xtr1_2 = np.dstack((train1_2_h, train1_2_v))
# xtr1_3 = np.dstack((train1_3_h, train1_3_v))
# xtr1_4 = np.dstack((train1_4_h, train1_4_v))
# xtr1_5 = np.dstack((train1_5_h, train1_5_v))

#%% #将数据标准化 Deng lei's code
mean1_1 =train1_1_h_o - np.mean(train1_1_h_o)
train1_1_h = mean1_1/np.std(train1_1_h_o)

mean1_2 =train1_2_h_o - np.mean(train1_2_h_o)
train1_2_h =mean1_2/ np.std(train1_2_h_o)

mean1_3 =train1_3_h_o - np.mean(train1_3_h_o)
train1_3_h = mean1_3/np.std(train1_3_h_o)

mean1_4 =train1_4_h_o - np.mean(train1_4_h_o)
train1_4_h = mean1_4 /np.std(train1_4_h_o)

mean1_5 =train1_5_h_o - np.mean(train1_5_h_o)
train1_5_h = mean1_5 /np.std(train1_5_h_o)

#将水平信号进行reshape
xtr1_1_h=train1_1_h.reshape((-1,32768))
xtr1_2_h=train1_2_h.reshape((-1,32768))
xtr1_3_h= train1_3_h.reshape(-1,32768)
xtr1_4_h= train1_4_h.reshape(-1,32768)
xtr1_5_h = train1_5_h.reshape(-1,32768)
print(xtr1_1_h.shape)

#将垂直数据标准化
mean1_1 =train1_1_v_o - np.mean(train1_1_v_o)
train1_1_v = mean1_1/np.std(train1_1_v_o)

mean1_2 =train1_2_v_o - np.mean(train1_2_v_o)
train1_2_v =mean1_2/ np.std(train1_2_v_o)


mean1_3 =train1_3_v_o - np.mean(train1_3_v_o)
train1_3_v = mean1_3/np.std(train1_3_v_o)

mean1_4 =train1_4_v_o - np.mean(train1_4_v_o)
train1_4_v = mean1_4 /np.std(train1_4_v_o)

mean1_5 =train1_5_v_o - np.mean(train1_5_v_o)
train1_5_v = mean1_5 /np.std(train1_5_v_o)

#将垂直信号进行reshape
xtr1_1_v=train1_1_v.reshape((-1,32768))
xtr1_2_v=train1_2_v.reshape((-1,32768))
xtr1_3_v= train1_3_v.reshape(-1,32768)
xtr1_4_v= train1_4_v.reshape(-1,32768)
xtr1_5_v = train1_5_v.reshape(-1,32768)
print(xtr1_1_v.shape)

# #将垂直信号和水平信号拼接在一起
xtr1_1 = np.hstack((train1_1_h,train1_1_v))
xtr1_1 = xtr1_1.reshape((-1,32768,2))
xtr1_2 = np.hstack((train1_2_h,train1_2_v))
xtr1_2 = xtr1_2.reshape((-1,32768,2))
xtr1_3 = np.hstack((train1_3_h,train1_3_v))
xtr1_3 = xtr1_3.reshape((-1,32768,2))
xtr1_4 = np.hstack((train1_4_h,train1_4_v))
xtr1_4 = xtr1_4.reshape((-1,32768,2))
xtr1_5 = np.hstack((train1_5_h,train1_5_v))
xtr1_5 = xtr1_5.reshape((-1,32768,2))
print(xtr1_1.shape)
print(xtr1_2.shape)
print(xtr1_3.shape)
print(xtr1_4.shape)
print(xtr1_5.shape)

#%%
#数据标签，剩余寿命
ytr1_1 = np.arange(xtr1_1.shape[0])
ytr1_1 = ytr1_1[::-1].reshape((-1,1))
ytr1_2 = np.arange(xtr1_2.shape[0])
ytr1_2 = ytr1_2[::-1].reshape((-1,1))

ytr1_3 = np.arange(xtr1_3.shape[0])
ytr1_3 = ytr1_3[::-1].reshape((-1,1))


ytr1_4 = np.arange(xtr1_4.shape[0])
ytr1_4 = ytr1_4[::-1].reshape((-1,1))

ytr1_5 = np.arange(xtr1_5.shape[0])
ytr1_5 = ytr1_5[::-1].reshape((-1,1))


# In[16]:
#将y标签归一化0——1，采用剩余寿命的百分比作为输出标签
#乘100
min_max_scaler = MinMaxScaler()
ytr1_1 = min_max_scaler.fit_transform(ytr1_1)*100
print(ytr1_1.shape)
ytr1_2 =  min_max_scaler.fit_transform(ytr1_2)*100
ytr1_3 =  min_max_scaler.fit_transform(ytr1_3)*100
ytr1_4 =  min_max_scaler.fit_transform(ytr1_4)*100
print(ytr1_2.shape)
print(ytr1_3.shape)
print(ytr1_4.shape)
ytr1_5 =  min_max_scaler.fit_transform(ytr1_5)*100
print(ytr1_5.shape)


# In[17]:

#选取轴承1_1为测试集
xtr_b1 = np.vstack((xtr1_2,xtr1_3,xtr1_4,xtr1_5))
xtr_b1 = xtr_b1.reshape(-1,32768,2)
print(xtr_b1.shape)

ytr_b1 = np.vstack((ytr1_2,ytr1_3,ytr1_4,ytr1_5))
print(len(ytr_b1))

#选取轴承1_2为测试集
xtr_b2 = np.vstack((xtr1_1,xtr1_3,xtr1_4,xtr1_5))
xtr_b2 = xtr_b2.reshape(-1,32768,2)
print(xtr_b2.shape)

ytr_b2 = np.vstack((ytr1_1,ytr1_3,ytr1_4,ytr1_5))
print(ytr_b2.shape)


#选取轴承1_3为测试集
xtr_b3 = np.vstack((xtr1_1,xtr1_2,xtr1_4,xtr1_5))
xtr_b3 = xtr_b3.reshape(-1,32768,2)
print(xtr_b3.shape)

ytr_b3 = np.vstack((ytr1_1,ytr1_2,ytr1_4,ytr1_5))
print(ytr_b3.shape)


#选择轴承1_4为测试集 
xtr_b4 = np.vstack((xtr1_1,xtr1_2,xtr1_3,xtr1_5))
xtr_b4 = xtr_b4.reshape(-1,32768,2)
print(xtr_b4.shape)

ytr_b4 = np.vstack((ytr1_1,ytr1_2,ytr1_3,ytr1_5))
print(ytr_b4.shape)



#选择轴承1_5为测试集
xtr_b5 = np.vstack((xtr1_1,xtr1_2,xtr1_3,xtr1_4))
xtr_b5 = xtr_b5.reshape(-1,32768,2)
print(xtr_b5.shape)

ytr_b5 = np.vstack((ytr1_1,ytr1_2,ytr1_3,ytr1_4))
print(ytr_b5.shape)

train_data_list = [(xtr_b1,ytr_b1,xtr1_1,ytr1_1),(xtr_b2,ytr_b2,xtr1_2,ytr1_2),(xtr_b3,ytr_b3,xtr1_3,ytr1_3),(xtr_b4,ytr_b4,xtr1_4,ytr1_4),(xtr_b5,ytr_b5,xtr1_5,ytr1_5)]


# In[23]:

# 改进后的评分函数
def score(ytr,ypred):
    grade_fr =[]
    grade_be =[]
    Er=ytr-ypred
    n = len(ytr)
    m = n//2
    Er_fr = Er[:m]
    Er_be = Er[m:]
#计算前期得分
    for er in Er_fr:
        if er >0:
            A=np.exp(np.log(0.6)*(er/40))
        else:
            A=np.exp(-np.log(0.6)*(er/10))
        grade_fr.append(A)
    for er in Er_be:
        if er >0:
            A=np.exp(np.log(0.6)*(er/40))
        else:
            A=np.exp(-np.log(0.6)*(er/10))
        grade_be.append(A)
    Score = 0.35*np.mean(grade_fr)+0.65*np.mean(grade_be)
    return Score


# In[24]:

def abs_backend(inputs):
    return K.abs(inputs)
def  expand_dim_backend(inputs):
    return K.expand_dims(inputs,1)
def sign_backend(inputs):
    return K.sign(inputs)
#换一种残差块
def tcnBlock(incoming,filters,kernel_size,dilation_rate):
    net = incoming
    identity = incoming
    net = BatchNormalization()(net)
#     net = Activation('relu')(net)
    net = keras.layers.LeakyReLU(alpha=0.2)(net)
    net = keras.layers.Dropout(0.3)(net)
    net = Conv1D(filters,kernel_size,padding='causal',dilation_rate=dilation_rate ,kernel_regularizer=regularizers.l2(1e-3))(net)
    net = BatchNormalization()(net)
    net = Activation('relu')(net)
#     net = keras.layers.LeakyReLU(alpha=0.2)(net)
    net = keras.layers.Dropout(0.3)(net)
    net = Conv1D(filters,kernel_size,padding='causal',dilation_rate=dilation_rate, kernel_regularizer=regularizers.l2(1e-3))(net)
    
    #calculate the global average pooling
    net_abs = Lambda(abs_backend)(net)
    abs_mean = GlobalAveragePooling1D()(net_abs)
    #计算系数
    #输出通道数
    scales = Dense(filters, activation=None, kernel_initializer='he_normal', 
                       kernel_regularizer=regularizers.l2(1e-4))(abs_mean)
    scales = BatchNormalization()(scales)
    scales = Activation('relu')(scales)
    scales = Dense(filters, activation='sigmoid', kernel_regularizer=regularizers.l2(1e-4))(scales)
    scales = Lambda(expand_dim_backend)(scales)
   # 计算阈值
    thres = keras.layers.multiply([abs_mean, scales])
    # 软阈值函数
    sub = keras.layers.subtract([net_abs, thres])
    zeros = keras.layers.subtract([sub, sub])
    n_sub = keras.layers.maximum([sub, zeros])
    net = keras.layers.multiply([Lambda(sign_backend)(net), n_sub])
    
    if identity.shape[-1]==filters:
        shortcut=identity
    else:
        shortcut=Conv1D(filters,kernel_size,padding = 'same')(identity)  #shortcut（捷径）
        
    net = keras.layers.add([net,shortcut])
    return net


# In[25]:

def build_tcn():
    inputs = Input(shape = (32768,2))
    net = Conv1D(16,12,strides=4,padding='causal',kernel_regularizer=regularizers.l2(1e-3))(inputs)
    net = MaxPooling1D(4)(net)

    net = keras.layers.Dropout(0.4)(net)
    net = tcnBlock(net,12,3,1)
    net = tcnBlock(net,6,3,2)
    net = tcnBlock(net,4,3,4)
    net = GlobalAveragePooling1D()(net)
#     net = keras.layers.Flatten()(net)
#     net = GRU(4,dropout=0.2)(net)
    outputs = Dense(1,activation ='relu')(net)
    model = Model(inputs=inputs, outputs=outputs)
    return model 


# In[26]:
def plot_fig(ytr,y_pred,i,j):
    from matplotlib.ticker import FuncFormatter
    fig, ax = plt.subplots(figsize=(7,5))
    ax.set_title('Bearing A2_'+str(j), fontsize=12)
    ax.set_xlabel('Time(min)', fontsize=12)
    ax.set_ylabel('RUL', fontsize=12)

    #画线
    epochs = range(1,len(y_pred)+1)
    ax.plot(epochs,y_pred,label="Proposed Method")
    ax.plot(epochs,ytr,label="Ground Truth")
    ax.legend(loc=0, numpoints=1)
    #百分比刻度
    def to_percent(temp, position):
        return '%1.0f'%(temp) + '%'
    plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))

    # 保存图片
    plt.savefig('F:/XJ1_'+str(j)+'_'+str(i)+'.png', bbox_inches='tight')


# In[27]:
def save_data(yte,y_pred,i,j):
    import pandas as pd
    #将好的曲线数据保存
    plot_data = np.hstack((yte,y_pred))
    dataframe = pd.DataFrame(plot_data)
    dataframe.to_excel('F:/XJ1_'+str(j)+'_'+str(i)+'.xls')

# In[28]:
def fit_model(xtr,ytr,val_x,val_y):
    model = build_tcn()
    # model.compile(loss='mae', optimizer=Adam(), metrics=['mae'])
    Adam = keras.optimizers.Adam(lr=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-08)
    model.compile(optimizer=Adam,loss='mse', metrics=['mae'])
    history = model.fit(xtr, ytr, batch_size=128, epochs=800, verbose=1,validation_data = (val_x,val_y))
    return model

# In[29]:
def run_model_1(xtr,ytr,xte,yte,i,j):
    model=fit_model(xtr,ytr,xte,yte)
    y_target = model.predict(xtr)
    y_pred = model.predict(xte)
    plot_fig(yte,y_pred,i,j)
    save_data(yte,y_pred,i,j)

    # deng lei's code
    Mae_1 = np.sum(np.absolute(y_pred-yte)/len(yte))
    Rmse_1 = (np.sum((y_pred-yte)**2/len(yte)))**0.5
    Score = score(yte,y_pred)

    # wyw's code
    # 选取y_pred后半段计算mae和rmse
    inx_mid = math.ceil(len(yte)/2)
    y_pred_second_half = y_pred[inx_mid:-1]
    y_pred_second_half = np.vstack((y_pred_second_half, y_pred[-1]))

    yte_second_half = yte[inx_mid:-1]
    yte_second_half = np.vstack((yte_second_half, yte[-1]))

    Mae_second_half = np.sum(np.absolute(y_pred_second_half - yte_second_half) / len(yte_second_half))
    Rmse_second_half = (np.sum((y_pred_second_half - yte_second_half)**2 / len(yte_second_half)))**0.5
    return Mae_1, Rmse_1, Score, Mae_second_half, Rmse_second_half


# In[30]:
# wyw's code
score_list = []
result = []
train_data = train_data_list[4]
xtr = train_data[0]
print(xtr.shape)
ytr = train_data[1]
xte = train_data[2]
print(xte.shape)
yte = train_data[3]

#%%
Mae, Rmse, Score, Mae_second_half, Rmse_second_half = run_model_1(xtr, ytr, xte, yte, 0, 1)
# score_list.append (Score)
# print(score_list)
# result.append(score_list)

# # // Deng Lei's code
# j=0
# mae_list =[]
# rmse_list =[]
# score_list=[]
# result =[]
# for train_data in train_data_list:
#     j=j+1
#     xtr = train_data[0]
#     ytr = train_data[1]
#     xte = train_data[2]
#     yte = train_data[3]
# #     print('xtr',xtr.shape)
# #     print('ytr',ytr.shape)
# #     print('xte',xte.shape)
# #     print('yte',yte.shape)
#     for i in range(5):
#         Mae,Rmse,Score=run_model_1(xtr,ytr,xte,yte,i,j)
# #         mae_list.append(Mae)
# #         rmse_list.append(Rmse)
#         score_list.append(Score)
# #     print(mae_list)
# #     print(rmse_list)
#     print(score_list)
# #     result.append(mae_list)
# #     result.append(rmse_list)
#     result.append(score_list)



#%%
# print(result)
# fin_re=result
# fin_re =np.array(fin_re) .reshape(-1,5)
# print(fin_re)





