from keras.layers import Input,concatenate,Embedding,UpSampling1D,BatchNormalization,Conv1D,MaxPooling1D,Dense,Flatten,Lambda,Dropout,Concatenate,LeakyReLU,BatchNormalization,Reshape,Activation,GlobalAveragePooling1D,AveragePooling1D
from keras import regularizers
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
from keras.models import Sequential, Model
from keras import backend as K
from keras import Model, regularizers

from scipy import signal
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import xlrd
import os
from sklearn.preprocessing import MinMaxScaler
from keras.models import load_model
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'

import keras
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))

#%%
#读取d刀具数据第1列的数据
def readfile_h(path):
    files = os.listdir(path)
    #解决乱序问题，以第5位到倒数第4位之间的数字的大小排序
    # files.sort(key=lambda x: int(x[3:-4]))
    rowdata = []
    for file in files:
        info = path+"/"+file
        #将csv文件里的数据转换成矩阵
        #skiprows是指跳过第几行
        data = np.loadtxt(open(info,"rb"), delimiter=',',skiprows=0)
        #第2行开始，第2列
        rowdata = np.hstack((rowdata,data[:,3]))
    return(rowdata)

train3_1_h_o = readfile_h('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c1')
train3_2_h_o = readfile_h('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c2')
train3_3_h_o = readfile_h('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c3')
train3_4_h_o = readfile_h('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c4')
train3_5_h_o = readfile_h('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c5')
train3_6_h_o = readfile_h('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c6')


plt.figure(figsize=(20,15))
plt.subplot(3, 2, 1)
plt.plot(train3_1_h_o)
plt.subplot(3, 2, 2)
plt.plot(train3_2_h_o)
plt.subplot(3, 2, 3)
plt.plot(train3_3_h_o)
plt.subplot(3, 2, 4)
plt.plot(train3_4_h_o)
plt.subplot(3, 2, 5)
plt.plot(train3_5_h_o)
plt.show()


#读取d刀具数据第2列的数据
def readfile_v(path):
    files = os.listdir(path)
    #解决乱序问题，以第5位到倒数第4位之间的数字的大小排序
    # files.sort(key=lambda x:int(x[3:-4]))
    rowdata = []
    for file in files:
        info = path+"/"+file
        #将csv文件里的数据转换成矩阵
        data = np.loadtxt(open(info,"rb"), delimiter=',',skiprows=0)
        #第2行开始，第2列
        rowdata = np.hstack((rowdata,data[:,4]))
    return(rowdata)


train3_1_v_o = readfile_v('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c1')
train3_2_v_o = readfile_v('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c2')
train3_3_v_o = readfile_v('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c3')
train3_4_v_o = readfile_v('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c4')
train3_5_v_o= readfile_v('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c5')
train3_6_v_o= readfile_v('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c6')

# plt.figure(figsize=(20,15))
# plt.subplot(3, 2, 1)
# plt.plot(train3_1_v_o)
# plt.subplot(3, 2, 2)
# plt.plot(train3_2_v_o)
# plt.subplot(3, 2, 3)
# plt.plot(train3_3_v_o)
# plt.subplot(3, 2, 4)
# plt.plot(train3_4_v_o)
# plt.subplot(3, 2, 5)
# plt.plot(train3_5_v_o)
# plt.subplot(3, 2, 6)
# plt.plot(train3_6_v_o)
# plt.show()
# plt.show()


#读取d刀具数据第3列的数据
def readfile_z(path):
    files = os.listdir(path)
    #解决乱序问题，以第5位到倒数第4位之间的数字的大小排序
    files.sort(key=lambda x:int(x[3:-4]))
    rowdata = []
    for file in files:
        info = path+"/"+file
        #将csv文件里的数据转换成矩阵
        data = np.loadtxt(open(info,"rb"), delimiter=',',skiprows=0)
        #第2行开始，第2列
        rowdata = np.hstack((rowdata,data[:,5]))
    return(rowdata)


train3_1_z_o = readfile_z('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c1')
train3_2_z_o = readfile_z('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c2')
train3_3_z_o = readfile_z('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c3')
train3_4_z_o = readfile_z('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c4')
train3_5_z_o= readfile_z('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c5')
train3_6_z_o= readfile_z('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c6')

# plt.figure(figsize=(20,15))
# plt.subplot(3, 2, 1)
# plt.plot(train3_1_z_o)
# plt.subplot(3, 2, 2)
# plt.plot(train3_2_z_o)
# plt.subplot(3, 2, 3)
# plt.plot(train3_3_z_o)
# plt.subplot(3, 2, 4)
# plt.plot(train3_4_z_o)
# plt.subplot(3, 2, 5)
# plt.plot(train3_5_z_o)
# plt.subplot(3, 2, 6)
# plt.plot(train3_6_z_o)
# plt.show()
# plt.show()


#将第3行的数据进行标准化
#将数据标准化
mean3_1 =train3_1_h_o - np.mean(train3_1_h_o)
train3_1_h = mean3_1/np.std(train3_1_h_o)

mean3_2 =train3_2_h_o - np.mean(train3_2_h_o)
train3_2_h =mean3_2/ np.std(train3_2_h_o)


mean3_3 =train3_3_h_o - np.mean(train3_3_h_o)
train3_3_h= mean3_3/np.std(train3_3_h_o)

mean3_4 =train3_4_h_o - np.mean(train3_4_h_o)
train3_4_h = mean3_4 /np.std(train3_4_h_o)


mean3_5 =train3_5_h_o - np.mean(train3_5_h_o)
train3_5_h= mean3_5 /np.std(train3_5_h_o)

mean3_6 =train3_6_h_o - np.mean(train3_6_h_o)
train3_6_h= mean3_6 /np.std(train3_6_h_o)


#将第4行的数据进行reshape 
xtr3_1_h=train3_1_h.reshape((-1,2000))
xtr3_2_h=train3_2_h.reshape((-1,2000))
xtr3_3_h= train3_3_h.reshape(-1,2000)
xtr3_4_h= train3_4_h.reshape(-1,2000)
xtr3_5_h = train3_5_h.reshape(-1,2000)
xtr3_6_h = train3_6_h.reshape(-1,2000)
print(xtr3_1_h.shape)
print(xtr3_2_h.shape)
print(xtr3_3_h.shape)
print(xtr3_4_h.shape)
print(xtr3_5_h.shape)
print(xtr3_6_h.shape)

#将第5行的数据进行标准化
mean3_1 =train3_1_v_o - np.mean(train3_1_v_o)
train3_1_v = mean3_1/np.std(train3_1_v_o)

mean3_2 =train3_2_v_o - np.mean(train3_2_v_o)
train3_2_v =mean3_2/ np.std(train3_2_v_o)


mean3_3 =train3_3_v_o - np.mean(train3_3_v_o)
train3_3_v = mean3_3/np.std(train3_3_v_o)

mean3_4 =train3_4_v_o - np.mean(train3_4_v_o)
train3_4_v = mean3_4 /np.std(train3_4_v_o)


mean3_5 =train3_5_v_o - np.mean(train3_5_v_o)
train3_5_v = mean3_5 /np.std(train3_5_v_o)

mean3_6 =train3_6_v_o - np.mean(train3_6_v_o)
train3_6_v = mean3_6 /np.std(train3_6_v_o)


#将垂直信号进行reshape
xtr3_1_v=train3_1_v.reshape((-1,2000))
xtr3_2_v=train3_2_v.reshape((-1,2000))
xtr3_3_v= train3_3_v.reshape(-1,2000)
xtr3_4_v= train3_4_v.reshape(-1,2000)
xtr3_5_v = train3_5_v.reshape(-1,2000)
xtr3_6_v = train3_6_v.reshape(-1,2000)
print(xtr3_1_v.shape)
print(xtr3_2_v.shape)
print(xtr3_4_v.shape)

#将第6行的数据进行标准化
mean3_1 =train3_1_z_o - np.mean(train3_1_z_o)
train3_1_z= mean3_1/np.std(train3_1_z_o)

mean3_2 =train3_2_z_o - np.mean(train3_2_z_o)
train3_2_z =mean3_2/ np.std(train3_2_z_o)


mean3_3 =train3_3_z_o - np.mean(train3_3_z_o)
train3_3_z = mean3_3/np.std(train3_3_z_o)

mean3_4 =train3_4_z_o - np.mean(train3_4_z_o)
train3_4_z = mean3_4 /np.std(train3_4_z_o)


mean3_5 =train3_5_z_o - np.mean(train3_5_z_o)
train3_5_z = mean3_5 /np.std(train3_5_z_o)

mean3_6 =train3_6_z_o - np.mean(train3_6_z_o)
train3_6_z = mean3_6 /np.std(train3_6_z_o)

#将第三行的信号进行reshape
xtr3_1_z=train3_1_z.reshape((-1,2000))
xtr3_2_z=train3_2_z.reshape((-1,2000))
xtr3_3_z= train3_3_z.reshape(-1,2000)
xtr3_4_z= train3_4_z.reshape(-1,2000)
xtr3_5_z = train3_5_z.reshape(-1,2000)
xtr3_6_z = train3_6_z.reshape(-1,2000)
print(xtr3_1_z.shape)
print(xtr3_2_z.shape)
print(xtr3_4_z.shape)

#将切削数据读入
#读取第i行的数据
#读取d刀具数据第3列的数据
def readfile(path,i):
    print(path)
    files = os.listdir(path)
    #解决乱序问题，以第5位到倒数第4位之间的数字的大小排序
    files.sort(key=lambda x:int(x[3:-4]))
    rowdata = []
    for file in files:
        info = path+"/"+file
        #将csv文件里的数据转换成矩阵
        data = np.loadtxt(open(info,"rb"), delimiter=',',skiprows=0)
        #第2行开始，第2列
        rowdata = np.hstack((rowdata,data[:,i]))
    return(rowdata)


train_origin = []
for i in range(3):
    data_train = []
    for j in range(1,7):
        data = readfile('E://Datasets//PHM data challenge//2010 PHM Society Conference Data Challenge-cutter//PHM2010//downsampling//c'+str(j),i)
        data_train.append(data)
    train_origin .append(data_train)
    print(i,"行刀具已添加")


train3_1_1_o = train_origin[0][0]
train3_2_1_o = train_origin[0][1]
train3_3_1_o = train_origin[0][2]
train3_4_1_o = train_origin[0][3]
train3_5_1_o= train_origin[0][4]
train3_6_1_o= train_origin[0][5]


#将第1行的数据进行标准化
mean3_1 =train3_1_1_o - np.mean(train3_1_1_o)
train3_1_1= mean3_1/np.std(train3_1_1_o)

mean3_2 =train3_2_1_o - np.mean(train3_2_1_o)
train3_2_1 =mean3_2/ np.std(train3_2_1_o)


mean3_3 =train3_3_1_o - np.mean(train3_3_1_o)
train3_3_1 = mean3_3/np.std(train3_3_1_o)

mean3_4 =train3_4_1_o - np.mean(train3_4_1_o)
train3_4_1 = mean3_4 /np.std(train3_4_1_o)


mean3_5 =train3_5_1_o - np.mean(train3_5_1_o)
train3_5_1 = mean3_5 /np.std(train3_5_1_o)

mean3_6 =train3_6_1_o - np.mean(train3_6_1_o)
train3_6_1 = mean3_6 /np.std(train3_6_1_o)


#将第1行的信号进行reshape 
xtr3_1_1=train3_1_1.reshape((-1,2000))
xtr3_2_1=train3_2_1.reshape((-1,2000))
xtr3_3_1= train3_3_1.reshape(-1,2000)
xtr3_4_1= train3_4_1.reshape(-1,2000)
xtr3_5_1 = train3_5_1.reshape(-1,2000)
xtr3_6_1 = train3_6_1.reshape(-1,2000)
print(xtr3_1_1.shape)
print(xtr3_2_1.shape)
print(xtr3_4_1.shape)


train3_1_2_o = train_origin[1][0]
train3_2_2_o = train_origin[1][1]
train3_3_2_o = train_origin[1][2]
train3_4_2_o = train_origin[1][3]
train3_5_2_o= train_origin[1][4]
train3_6_2_o= train_origin[1][5]


#将第2行的数据进行标准化
mean3_1 =train3_1_2_o - np.mean(train3_1_2_o)
train3_1_2= mean3_1/np.std(train3_1_2_o)

mean3_2 =train3_2_2_o - np.mean(train3_2_2_o)
train3_2_2 =mean3_2/ np.std(train3_2_2_o)


mean3_3 =train3_3_2_o - np.mean(train3_3_2_o)
train3_3_2 = mean3_3/np.std(train3_3_2_o)

mean3_4 =train3_4_2_o - np.mean(train3_4_2_o)
train3_4_2 = mean3_4 /np.std(train3_4_2_o)


mean3_5 =train3_5_2_o - np.mean(train3_5_2_o)
train3_5_2 = mean3_5 /np.std(train3_5_2_o)

mean3_6 =train3_6_2_o - np.mean(train3_6_2_o)
train3_6_2 = mean3_6 /np.std(train3_6_2_o)


#将第2行的信号进行reshape 
xtr3_1_2=train3_1_2.reshape((-1,2000))
xtr3_2_2=train3_2_2.reshape((-1,2000))
xtr3_3_2= train3_3_2.reshape(-1,2000)
xtr3_4_2= train3_4_2.reshape(-1,2000)
xtr3_5_2 = train3_5_2.reshape(-1,2000)
xtr3_6_2 = train3_6_2.reshape(-1,2000)
print(xtr3_1_2.shape)
print(xtr3_2_2.shape)
print(xtr3_4_2.shape)


train3_1_3_o = train_origin[2][0]
train3_2_3_o = train_origin[2][1]
train3_3_3_o = train_origin[2][2]
train3_4_3_o = train_origin[2][3]
train3_5_3_o= train_origin[2][4]
train3_6_3_o= train_origin[2][5]


#将第3行的数据进行标准化
mean3_1 =train3_1_3_o - np.mean(train3_1_3_o)
train3_1_3= mean3_1/np.std(train3_1_3_o)

mean3_2 =train3_2_3_o - np.mean(train3_2_3_o)
train3_2_3 =mean3_2/ np.std(train3_2_3_o)


mean3_3 =train3_3_3_o - np.mean(train3_3_3_o)
train3_3_3 = mean3_3/np.std(train3_3_3_o)

mean3_4 =train3_4_3_o - np.mean(train3_4_3_o)
train3_4_3 = mean3_4 /np.std(train3_4_3_o)


mean3_5 =train3_5_3_o - np.mean(train3_5_3_o)
train3_5_3 = mean3_5 /np.std(train3_5_3_o)

mean3_6 =train3_6_3_o - np.mean(train3_6_3_o)
train3_6_3 = mean3_6 /np.std(train3_6_3_o)



#将第3行的信号进行reshape 
xtr3_1_3=train3_1_3.reshape((-1,2000))
xtr3_2_3=train3_2_3.reshape((-1,2000))
xtr3_3_3= train3_3_3.reshape(-1,2000)
xtr3_4_3= train3_4_3.reshape(-1,2000)
xtr3_5_3 = train3_5_3.reshape(-1,2000)
xtr3_6_3 = train3_6_3.reshape(-1,2000)
print(xtr3_1_3.shape)
print(xtr3_2_3.shape)
print(xtr3_4_3.shape)

#将用6个通道的数据的拼接在一起
xtr3_1 = np.hstack((xtr3_1_h,xtr3_1_v,xtr3_1_z,xtr3_1_1,xtr3_1_2,xtr3_1_3))
xtr3_1 = xtr3_1.reshape((-1,2000,6))
xtr3_2 = np.hstack((xtr3_2_h,xtr3_2_v,xtr3_2_z,xtr3_2_1,xtr3_2_2,xtr3_2_3))
xtr3_2 = xtr3_2.reshape((-1,2000,6))
xtr3_3 = np.hstack((xtr3_3_h,xtr3_3_v,xtr3_3_z,xtr3_3_1,xtr3_3_2,xtr3_3_3))
xtr3_3 = xtr3_3.reshape((-1,2000,6))
xtr3_4 = np.hstack((xtr3_4_h,xtr3_4_v,xtr3_4_z,xtr3_4_1,xtr3_4_2,xtr3_4_3))
xtr3_4 = xtr3_4.reshape((-1,2000,6))
xtr3_5 = np.hstack((xtr3_5_h,xtr3_5_v,xtr3_5_z,xtr3_5_1,xtr3_5_2,xtr3_5_3))
xtr3_5 = xtr3_5.reshape((-1,2000,6))
xtr3_6 = np.hstack((xtr3_6_h,xtr3_6_v,xtr3_6_z,xtr3_6_1,xtr3_6_2,xtr3_6_3))
xtr3_6 = xtr3_6.reshape((-1,2000,6))
print(xtr3_1.shape)
print(xtr3_2.shape)
print(xtr3_3.shape)
print(xtr3_4.shape)
print(xtr3_5.shape)
print(xtr3_6.shape)


#数据标签，剩余寿命
ytr3_1 = np.arange(xtr3_1.shape[0])
ytr3_1 = ytr3_1[::-1].reshape((-1,1))

ytr3_2 = np.arange(xtr3_2.shape[0])
ytr3_2 = ytr3_2[::-1].reshape((-1,1))

ytr3_3 = np.arange(xtr3_3.shape[0])
ytr3_3 = ytr3_3[::-1].reshape((-1,1))


ytr3_4 = np.arange(xtr3_4.shape[0])
ytr3_4 = ytr3_4[::-1].reshape((-1,1))

ytr3_5 = np.arange(xtr3_5.shape[0])
ytr3_5 = ytr3_5[::-1].reshape((-1,1))

ytr3_6 = np.arange(xtr3_6.shape[0])
ytr3_6 = ytr3_6[::-1].reshape((-1,1))


# In[33]:

#选取刀具1为测试集
xtr_c1 = np.vstack((xtr3_2,xtr3_3,xtr3_4,xtr3_5,xtr3_6))
xtrc_1 = xtr_c1.reshape(-1, 2000, 6)
print(xtrc_1.shape)

ytr_c1 = np.vstack((ytr3_2,ytr3_3,ytr3_4,ytr3_5,ytr3_6))
print(len(ytr_c1))


#选取刀具2为测试集
xtr_c2= np.vstack((xtr3_1,xtr3_3,xtr3_4,xtr3_5,xtr3_6))
xtr_c2= xtr_c2.reshape(-1,2000,6)
print(xtr_c2.shape)

ytr_c2 = np.vstack((ytr3_1,ytr3_3,ytr3_4,ytr3_5,ytr3_6))
print(ytr_c2.shape)

#选取刀具3为测试集
xtr_c3 = np.vstack((xtr3_1,xtr3_2,xtr3_4,xtr3_5,xtr3_6))
xtr_c3 = xtr_c3.reshape(-1,2000,6)
print(xtr_c3.shape)

ytr_c3 = np.vstack((ytr3_1,ytr3_2,ytr3_4,ytr3_5,ytr3_6))
print(ytr_c3.shape)


#选择刀具4为测试集 
xtr_c4 = np.vstack((xtr3_1,xtr3_2,xtr3_3,xtr3_5,xtr3_6))
xtr_c4 = xtr_c4.reshape(-1,2000,6)
print(xtr_c4.shape)

ytr_c4 = np.vstack((ytr3_1,ytr3_2,ytr3_3,ytr3_5,ytr3_6))
print(ytr_c4.shape)



#选择刀具5为测试集
xtr_c5 = np.vstack((xtr3_1,xtr3_2,xtr3_3,xtr3_4,xtr3_6))
xtr_c5 = xtr_c5.reshape(-1,2000,6)
print(xtr_c5.shape)

ytr_c5 = np.vstack((ytr3_1,ytr3_2,ytr3_3,ytr3_4,ytr3_6))
print(ytr_c5.shape)


#选择刀具6为测试集
xtr_c6 = np.vstack((xtr3_1,xtr3_2,xtr3_3,xtr3_4,xtr3_5))
xtr_c6 = xtr_c6.reshape(-1,2000,6)
print(xtr_c6.shape)

ytr_c6 = np.vstack((ytr3_1,ytr3_2,ytr3_3,ytr3_4,ytr3_5))
print(ytr_c6.shape)



train_data_list = [(xtr_c1,ytr_c1,xtr3_1,ytr3_1),(xtr_c2,ytr_c2,xtr3_2,ytr3_2),(xtr_c3,ytr_c3,xtr3_3,ytr3_3),(xtr_c4,ytr_c4,xtr3_4,ytr3_4),(xtr_c5,ytr_c5,xtr3_5,ytr3_5),(xtr_c6,ytr_c6,xtr3_6,ytr3_6)]


# In[40]:

#评价函数
def score(y_true, y_pred):
    n = y_pred.shape[0]
    m = n//2
    t = 0
    s11 = 0
    s12 = 0
    s21 = 0
    s22 = 0
    for i in range(n):
        t = t + y_true[i][0]
    t = t/n
    for i in range(m):
        a = y_pred[i][0]-y_true[i][0]
        c = y_pred[i][0]-t
        if a>0:
            b = 1.1
        else:b = 0.9
        s11 = s11 + b*a**2
        s12 = s12 + c**2
    for j in range(n-m):
        a = y_pred[j+m][0]-y_true[j+m][0]
        c = y_pred[j+m][0]-t
        if a>0:
            b = 1.1
        else:b = 0.9
        s21 = s21 + b*a**2
        s22 = s22 + c**2
    return(1-0.35*s11/s12-0.65*s21/s22)


def abs_backend(inputs):
    return K.abs(inputs)
def  expand_dim_backend(inputs):
    return K.expand_dims(inputs,1)
def sign_backend(inputs):
    return K.sign(inputs)
#换一种残差块
def tcnBlock(incoming,filters,kernel_size,dilation_rate):
    net = incoming
    identity = incoming
    net = BatchNormalization()(net)
#     net = Activation('relu')(net)
    net = keras.layers.LeakyReLU(alpha=0.2)(net)
    net = keras.layers.Dropout(0.3)(net)
    net = Conv1D(filters,kernel_size,padding='causal',dilation_rate=dilation_rate ,kernel_regularizer=regularizers.l2(1e-3))(net)
    net = BatchNormalization()(net)
    net = Activation('relu')(net)
#     net = keras.layers.LeakyReLU(alpha=0.2)(net)
    net = keras.layers.Dropout(0.3)(net)
    net = Conv1D(filters,kernel_size,padding='causal',dilation_rate=dilation_rate, kernel_regularizer=regularizers.l2(1e-3))(net)
    
    #计算全局均值
    net_abs = Lambda(abs_backend)(net)
    abs_mean = GlobalAveragePooling1D()(net_abs)
    #计算系数
    #输出通道数
    scales = Dense(filters, activation=None, kernel_initializer='he_normal', 
                       kernel_regularizer=regularizers.l2(1e-4))(abs_mean)
    scales = BatchNormalization()(scales)
    scales = Activation('relu')(scales)
    scales = Dense(filters, activation='sigmoid', kernel_regularizer=regularizers.l2(1e-4))(scales)
    scales = Lambda(expand_dim_backend)(scales)
   # 计算阈值
    thres = keras.layers.multiply([abs_mean, scales])
    # 软阈值函数
    sub = keras.layers.subtract([net_abs, thres])
    zeros = keras.layers.subtract([sub, sub])
    n_sub = keras.layers.maximum([sub, zeros])
    net = keras.layers.multiply([Lambda(sign_backend)(net), n_sub])
    
    if identity.shape[-1]==filters:
        shortcut=identity
    else:
        shortcut=Conv1D(filters,kernel_size,padding = 'same')(identity)  #shortcut（捷径）
        
    net = keras.layers.add([net,shortcut])
    return net



def build_tcn():
    inputs = Input(shape = (2000,6))
    net = Conv1D(16,12,strides=4,padding='causal',kernel_regularizer=regularizers.l2(1e-3))(inputs)
    net = MaxPooling1D(4)(net)
    net = keras.layers.Dropout(0.4)(net)
    net = tcnBlock(net,12,3,1)
    net = tcnBlock(net,6,3,2)
    net = tcnBlock(net,4,3,4)
    net = GlobalAveragePooling1D()(net)
#     net = keras.layers.Flatten()(net)
#     net = GRU(4,dropout=0.2)(net)
    outputs = Dense(1,activation ='relu')(net)
    model = Model(inputs=inputs, outputs=outputs)
    return model 



def plot_fig(ytr,y_pred,i,j):
    from matplotlib.ticker import FuncFormatter
    fig, ax = plt.subplots(figsize=(7,5))
    ax.set_title('Bearing B3_'+str(j), fontsize=12)
    ax.set_xlabel('Time(min)', fontsize=12)
    ax.set_ylabel('RUL', fontsize=12)

    #画线
    epochs = range(1,len(y_pred)+1)
    ax.plot(epochs,y_pred,label="Proposed Method")
    ax.plot(epochs,ytr,label="Ground Truth")
    ax.legend(loc=0, numpoints=1)
    #百分比刻度
    def to_percent(temp, position):
        return '%1.0f'%(temp) + '%'
    plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))

    # 保存图片
    plt.savefig('D://Contributions//J13//cutter_'+str(j)+'_'+str(i)+'.png', bbox_inches='tight')



def save_data(yte,y_pred,i,j):
    import pandas as pd
    #将好的曲线数据保存
    plot_data = np.hstack((yte,y_pred))
    dataframe = pd.DataFrame(plot_data)
    dataframe.to_excel('D://Contributions//J13//cutter_'+str(j)+'_'+str(i)+'.xls')


def fit_model(xtr,ytr,val_x,val_y):
    model = build_tcn()
    # model.compile(loss='mae', optimizer=Adam(), metrics=['mae'])
    Adam = keras.optimizers.Adam(lr=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-08)
    model.compile(optimizer=Adam,loss='mse', metrics=['mae'])
    history = model.fit(xtr, ytr, batch_size=128, epochs=800, verbose=1,validation_data = (val_x,val_y))
    return model


def run_model_1(xtr,ytr,xte,yte,i,j):
    model=fit_model(xtr,ytr,xte,yte)
    y_target = model.predict(xtr)
    y_pred = model.predict(xte)
    plot_fig(yte,y_pred,i,j)
    save_data(yte,y_pred,i,j)
    Mae_1 = np.sum(np.absolute(y_pred-yte)/len(yte))
    Rmse_1 = (np.sum((y_pred-yte)**2/len(yte)))**0.5
    Score = score(yte,y_pred)
    return Mae_1,Rmse_1,Score


#%%
score_list = []
result = []
train_data = train_data_list[4]
xtr = train_data[0]
print(xtr.shape)
ytr = train_data[1]
xte = train_data[2]
print(xte.shape)
yte = train_data[3]

#%%
Mae, Rmse, Score = run_model_1(xtr, ytr, xte, yte, 0, 1)

# # In[47]:
#
#
# j=0
# mae_list =[]
# rmse_list =[]
# score_list=[]
# result =[]
# for train_data in train_data_list:
#     j=j+1
#     xtr = train_data[0]
#     ytr = train_data[1]
#     xte = train_data[2]
#     yte = train_data[3]
# #     print('xtr',xtr.shape)
# #     print('ytr',ytr.shape)
# #     print('xte',xte.shape)
# #     print('yte',yte.shape)
#     for i in range(5):
#         Mae,Rmse,Score=run_model_1(xtr,ytr,xte,yte,i,j)
# #         mae_list.append(Mae)
# #         rmse_list.append(Rmse)
#         score_list.append(Score)
# #     print(mae_list)
# #     print(rmse_list)
#     print(score_list)
# #     result.append(mae_list)
# #     result.append(rmse_list)
#     result.append(score_list)
#
#
# # In[48]:
#
#
# print(result)
#
#
# # In[50]:
#
#
# print(np.array(result).reshape(-1,5))
#
#
# # In[ ]:




