import scipy.io as io
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import tree
import tensorflow as tf
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder
#%%  导入训练集和测试集
xtrain=io.loadmat('xtrain.mat');    xtrain=xtrain['inbase']
ytrain=io.loadmat('ytrain.mat');    ytrain=ytrain['outbase']
xtest=io.loadmat('xtest');          xtest=xtest['P_test']
ytest=io.loadmat('ytest');          ytest=ytest['T_test']
normal=io.loadmat('normal');

ytraint=ytrain[:,0];    ytestt=ytest[:,0]   #故障类型
ytrainl=ytrain[:,1];    ytestl=ytest[:,1]   #故障等级

enc = OneHotEncoder(sparse=False)   #标签独热编码
data=ytraint.reshape(-1,1)
ytraint = enc.fit_transform(data)
data=ytestt.reshape(-1,1)
ytestt = enc.fit_transform(data)

# 补齐ytestt
ans=np.zeros(30).reshape(30,1)
ytestt=np.hstack((ans,ytestt))

#%% PCA降维
from sklearn.decomposition import PCA
pca=PCA(n_components=0.99,svd_solver='full').fit(xtrain)
xtrain=pca.transform(xtrain) 
xtest=pca.transform(xtest)

#%%  故障类型模型
modelt=tf.keras.Sequential()
modelt.add(tf.keras.layers.Dense(280, input_shape=xtrain.shape[1:], activation='relu'))
modelt.add(tf.keras.layers.Dropout(rate=0.6))
modelt.add(tf.keras.layers.Dense(140,activation='tanh'))
modelt.add(tf.keras.layers.Dropout(rate=0.5))
modelt.add(tf.keras.layers.Dense(70,activation='sigmoid'))
modelt.add(tf.keras.layers.Dropout(rate=0.4))
#正则化参数L2
modelt.add(tf.keras.layers.Dense(35,kernel_regularizer=kersa.regularizers.l2(0.001),activation='relu'))

modelt.add(tf.keras.layers.Dense(7,  activation='softmax'))

modelt.summary()

modelt.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['acc']
              )
history = modelt.fit(xtrain,ytraint,
                    epochs=229,
                    validation_data=(xtest,ytestt)
                    )
plt.subplot(1,2,1)
plt.plot(history.history['acc'],'r',label='acc')
plt.plot(history.history['val_acc'],'b',label='val_acc')
plt.legend()

plt.subplot(1,2,2)
plt.plot(history.history['loss'],'r',label='loss')
plt.plot(history.history['val_loss'],'b',label='val_loss')
plt.legend()

#%%  故障等级模型
mae=[];mse=[]
ytrainl=ytrainl.reshape(-1,1)
modell=tf.keras.Sequential()
modell.add(tf.keras.layers.Dense(35,input_shape=xtrain.shape[1:],
                                  kernel_regularizer=tf.keras.regularizers.l2(70),
                                 activation='relu'))
modell.add(tf.keras.layers.Dropout(rate=0.029))
modell.add(tf.keras.layers.Dense(1))

# modell.add(tf.keras.layers.Dropout(rate=0.5))
# modell.add(tf.keras.layers.Dense(7,kernel_regularizer=tf.keras.regularizers.l1(2.5),activation='relu'))
# modell.add(tf.keras.layers.Dropout(rate=0.2))
# modell.add(tf.keras.layers.Dense(1,activation='relu'))
modell.summary()
#15的时候不错
lr_reduce =tf.keras.callbacks.ReduceLROnPlateau('val_loss', patience=30,factor=0.5)   

modell.compile(optimizer='adam',
               loss='mse',
               metrics=['mae','mse'])
history=modell.fit(xtrain,ytrainl,
                   epochs=1500,
                   callbacks=[lr_reduce],
                   validation_data=(xtest,ytestl))
pre=modell.predict(xtest)
plt.figure(1)
plt.plot(history.history['mae'],'r',label='real')
plt.plot(history.history['val_mae'],'b',label='predict')
plt.legend()
plt.figure(2)
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',history.history['val_mae'][-1])
print('mse:',history.history['val_mse'][-1])



#学习曲线
plt.figure(figsize=(8,2))
plt.plot(history.history['mse'][100:1600],'r',label='real')
plt.plot(history.history['val_mse'][100:1600],'b',label='predict')
plt.legend()
ax1=plt.gca()
ax1.set_title('MSE learning curve')

plt.plot(xtrain[0])

    




#%%模型保存
modell.save('nn_reg_model.h5')
# 加载模型
modell=tf.keras.models.load_model('nn_reg_model.h5')


#%%   机器学习解决回归问题
#回归树
x=np.vstack((xtrain,xtest))
y=np.vstack((ytrainl.reshape(-1,1),ytestl.reshape(-1,1)))

from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV as GS
reg=DecisionTreeRegressor(random_state=1)
para_dict={'criterion':['mse','friedman_mse','mae'],
           'max_depth':np.arange(3,11,1),
           'max_features':["auto", "sqrt", "log2"]}
GS=GS(reg,para_dict,cv=10)
GS=GS.fit(x,y)
print(GS.best_params_);print(GS.best_score_)


reg=DecisionTreeRegressor(random_state=1,criterion=GS.best_params_['criterion'],
        max_depth=GS.best_params_['max_depth'],
        max_features=GS.best_params_['max_features']).fit(xtrain,ytrainl)
pre=reg.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
#%%   LR线性回归（pca后的结果更好）
from sklearn import metrics

from sklearn.linear_model import LinearRegression as LR
regLR=LR(fit_intercept=True,).fit(xtrain,ytrainl)
pre=regLR.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))

#%%   岭回归
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV as gscv
reg=Ridge()
x=np.vstack((xtrain,xtest))
y=np.vstack((ytrainl.reshape(-1,1),ytestl.reshape(-1,1)))

para_dict={'alpha':np.arange(0.1,1,0.1),
           'solver':['auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'],
           }
gscv=gscv(reg,para_dict,cv=10)
gscv.fit(x,y)
print(gscv.best_params_)
print(gscv.best_score_)

reg=Ridge(solver=gscv.best_params_['solver']).fit(xtrain,ytrainl)
plt.plot(ytestl,'r',label='real')
plt.plot(reg.predict(xtest),'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))



from sklearn.model_selection import cross_val_score
regLR
x=np.vstack((xtrain,xtest))
y=np.vstack((ytrainl.reshape(-1,1),ytestl.reshape(-1,1)))


regLR=regLR().fit(xtrain,ytrainl)
regLR=cross_val_score(regLR,x,y,cv=10)
plt.plot(regLR)









#%%  lasso
#   Lasso回归(默认效果挺好)
from sklearn.linear_model import Lasso
from sklearn.model_selection import RandomizedSearchCV as GS

regLA=Lasso(alpha=1).fit(xtrain,ytrainl)
pre=regLA.predict(xtest)
plt.plot(ytestl,'r',label='real')
plt.plot(regLA.predict(xtest),'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))

#  网格搜索
regLA=Lasso()
x=np.vstack((xtrain,xtest))
y=np.vstack((ytrainl.reshape(-1,1),ytestl.reshape(-1,1)))
para_dict={'alpha':np.arange(0.1,1.5,0.1),
           'random_state':np.arange(1,80)
           }
GS=GS(regLA,para_dict,cv=10)
GS.fit(x,y)
print(GS.best_params_)
print(GS.best_score_)
regLA=Lasso(alpha=GS.best_params_['alpha'],
            normalize=GS.best_params_['normalize'],
            random_state=GS.best_params_['random_state']).fit(xtrain,ytrainl)
plt.plot(ytestl,'r',label='real')
plt.plot(regLA.predict(xtest),'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,regLA.predict(xtest)))
print('mse:',metrics.mean_squared_error(ytestl,regLA.predict(xtest)))



#%%

#   随机森林回归
from sklearn.ensemble import RandomForestRegressor
regRF=RandomForestRegressor(n_estimators=111).fit(xtrain,ytrainl)
pre=regRF.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))

#   Xgboosst
from xgboost import XGBRFRegressor as XGBR
regXG=XGBR(n_estimators=300).fit(xtrain,ytrainl)
pre=regXG.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))

#%%    Bagging(分类效果好  93.3%)
from sklearn.ensemble import BaggingClassifier
clf=BaggingClassifier().fit(xtrain,ytraint)
plt.plot(ytestt,'r',label='real')
plt.plot(clf.predict(xtest),'b',label='predict')
plt.legend()

from sklearn.ensemble import BaggingRegressor
regBA=BaggingRegressor().fit(xtrain,ytrainl)
pre=regBA.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))

#%%   Extra
from sklearn.ensemble import ExtraTreesClassifier
clf=ExtraTreesClassifier().fit(xtrain,ytraint)
plt.plot(ytestt,'r',label='real')
plt.plot(clf.predict(xtest),'b',label='predict')
plt.legend()

from sklearn.ensemble import ExtraTreesRegressor
regEX=ExtraTreesRegressor(700).fit(xtrain,ytrainl)
pre=regEX.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))


#%%   GradientBoostingClassifier (分类效果好  93.3%)
from sklearn.ensemble import GradientBoostingClassifier
clf=GradientBoostingClassifier().fit(xtrain,ytraint)
plt.plot(ytestt,'r',label='real')
plt.plot(clf.predict(xtest),'b',label='predict')
plt.legend()

from sklearn.ensemble import GradientBoostingRegressor
regGB=GradientBoostingRegressor().fit(xtrain,ytrainl)
pre=regGB.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))

#%%    voting（参照sklearn中文文档）
from sklearn.ensemble import VotingRegressor
vote=VotingRegressor(estimators=[('Bagging',regBA),('LR',regLR),('Lasso',regLA),('RF',regRF),('GBDT',regGB)])
vote.fit(xtrain,ytrainl)
pre=vote.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))

#%%    stacking（参照sklearn中文文档）
from sklearn.ensemble import VotingRegressor
vote=VotingRegressor(estimators=[('Bagging',regBA),('LR',regLR),('Lasso',regLA),('RF',regRF),('GBDT',regGB)])
vote.fit(xtrain,ytrainl)
pre=vote.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))


#%%   逻辑回归
from sklearn.linear_model import LogisticRegression as LR
LR=LR().fit(xtrain,ytraint)
plt.plot(ytestt,'r',label='real')
plt.plot(LR.predict(xtest),'b',label='predict')
plt.legend()
#%%  lgbm
import lightgbm as lgbm
reg=lgbm.LGBMRegressor().fit(xtrain,ytrainl)
pre=reg.predict(xtest)
plt.figure()
plt.plot(ytestl,'r',label='real')
plt.plot(pre,'b',label='predict')
plt.legend()
print('mae:',metrics.mean_absolute_error(ytestl,pre))
print('mse:',metrics.mean_squared_error(ytestl,pre))


#%%  决策树
from sklearn import tree
x=np.vstack((xtrain,xtest))
y=np.vstack((ytraint.reshape(-1,1),ytestt.reshape(-1,1)))
clf=tree.DecisionTreeClassifier()

clf=tree.DecisionTreeClassifier(random_state=11,
                                # criterion=,
                                # max_depth=,
                                # max_features=
                                )
clf.fit(xtrain,ytraint)
pre=clf.predict(xtest)
plt.plot(ytestt,'r*',label='real')
plt.plot(pre,'b^',label='predict')
plt.legend()








import graphviz
feature_names=np.arange(1,1993,1)
dot_data=tree.export_graphviz(clf,
                              feature_names=feature_names,
                              class_names=['正常','匝间短路','线圈断股','金属异物','轴向扭曲','径向内凹','径向外凹'],
                              filled=True,         #filled=True 给决策树填充颜色
                              rounded=True,        #rounded=True 使框有圆角
                              )
graph=graphviz.Source(dot_data)

#%%   验证集
plt.plot(ytestt,'r*',label='real')
plt.plot(pre,'b^',label='predict')
plt.legend()
plt.yticks([1,2,3,4,5,6,],
           ['Short circuit between turns','Broken coil','Metal foreign body','Axial displacement','Radial inward twist','Radial outward twist'])
plt.xlabel('Sample')
plt.ylabel('Fault type')
ax=plt.gca()
ax.spines['right'].set_color('none')            #将右边的边框设置成无
ax.spines['top'].set_color('none') 







#%%  决策树模型贡献度
imp=[clf.feature_importances_[17],clf.feature_importances_[25], 
     clf.feature_importances_[700],clf.feature_importances_[1278],
     clf.feature_importances_[1394]]
plt.bar(['18','26','701','1279','1395'],imp)

plt.xlabel('Element')
plt.ylabel('contribution')




plt.subplot(211)
plt.plot(ytraint,'r^',label='Training data')
plt.plot(clf.predict(xtrain),'bv',label='Training data predict')
plt.title('Training data')
plt.subplot(212)
plt.plot(ytestt,'r^',label='real')
plt.plot(clf.predict(xtest),'bv',label='predict')
plt.title('Validation data')
plt.yticks([1,2,3,4,5,6,],
           ['Short circuit between turns','Broken coil','Metal foreign body','Axial displacement','Radial inward twist','Radial outward twist'])
plt.legend()
plt.xlabel('Sample')
plt.ylabel('Fault type')
ax=plt.gca()
ax.spines['right'].set_color('none')            #将右边的边框设置成无
ax.spines['top'].set_color('none') 




#%%  作图
plt.figure(figsize=(8,5))
for i in range(1,55):
    if i<9:
        plt.plot(xtrain[i],'b',lw=0.7,alpha=0.5)
    if 9<i<18:
        plt.plot(xtrain[i],'g',lw=0.7,alpha=0.5)
    if 18<i<27:
        plt.plot(xtrain[i],'r',lw=0.7,alpha=0.5)
    if 27<i<36:
        plt.plot(xtrain[i],'k',lw=0.7,alpha=0.5)
    if 36<i<45:
        plt.plot(xtrain[i],'c',lw=0.7,alpha=0.5)
    if 45<i<54:
        plt.plot(xtrain[i],'y',lw=0.7,alpha=0.5)
plt.plot(xtrain[9],'b',lw=0.7,alpha=0.5,label='Short circuit between turns')
plt.plot(xtrain[18],'g',lw=0.7,alpha=0.5,label='Broken coil')
plt.plot(xtrain[27],'r',lw=0.7,alpha=0.5,label='Metal foreign body')
plt.plot(xtrain[36],'k',lw=0.7,alpha=0.5,label='Axial displacement')
plt.plot(xtrain[45],'c',lw=0.7,alpha=0.5,label='Radial inward twist')
plt.plot(xtrain[54],'y',lw=0.7,alpha=0.5,label='Radial outward twist')
plt.legend(loc='lower left')
plt.ylabel('dB')
plt.xlabel('Frequency')


for i in range(1,17):
    plt.plot(xtrain[i*3+3],'b',lw=0.7,alpha=0.5)
    plt.plot(xtrain[i*3+2],'g',lw=0.7,alpha=0.5)
    plt.plot(xtrain[i*3+1],'r',lw=0.7,alpha=0.5)

plt.plot(xtrain[54],'b',alpha=0.5,lw=0.7,label='30%')
plt.plot(xtrain[53],'g',alpha=0.5,lw=0.7,label='20%')
plt.plot(xtrain[52],'r',alpha=0.5,lw=0.7,label='10%')
plt.legend()
plt.ylabel('dB')
plt.xlabel('Frequency')
