
import numpy as np
import pandas as pd

from sklearn.neural_network import MLPClassifier

from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils,plot_model
from sklearn.model_selection import cross_val_score,train_test_split,KFold
from sklearn.preprocessing import LabelEncoder
from keras.layers import Dense,Dropout,Flatten,Activation,BatchNormalization,AveragePooling1D,GlobalAveragePooling1D,Conv1D,MaxPooling1D,Bidirectional,LSTM,TimeDistributed
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sns
from keras import optimizers
import tensorflow as tf
from sklearn.preprocessing import Normalizer
# 载入数据
df = pd.read_csv(r"C:\Users\finally\2.csv")

df=df.sample(frac=1).reset_index(drop=True)
a=df.values[:, 0:190]
y=Y = df.values[:,190]


scaler = Normalizer()
scaler.fit(a)

b = scaler.transform(a)
X = np.expand_dims(a.astype(float), axis=2)
Y = df.values[:,190]
# 分类编码为数字
encoder = LabelEncoder()
Y_encoded = encoder.fit_transform(Y)
Y_onehot = np_utils.to_categorical(Y_encoded)
# 划分训练集，测试集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_onehot, test_size = 0.2, random_state = 0)
X_train, X_vali, Y_train, Y_vali = train_test_split(X_train, Y_train, test_size = 0.25, random_state = 0)

model = Sequential()
model.add(Conv1D(8,3, input_shape=(190, 1),activation='relu',padding='same'))
model.add(MaxPooling1D(3))
model.add(Dropout(0.2))

model.add(Flatten())
model.add(Dense(30, activation='softmax'))
plot_model(model, to_file='./model_classifier.png', show_shapes=True)

#    print(model.summary())
print(model.summary())
from tensorflow.python.keras.utils.vis_utils import plot_model
plot_model(model, to_file='A1model.png')
#model.compile(optimizer="Adam", loss='categorical_crossentropy', metrics=["acc"])#    
#     model.compile(loss=',optimizer=RMSprop(), metrics=['accuracy'])el



#early_stopping = tf.keras.callbacks.EarlyStopping(
#                            monitor='val_loss', 
#                            min_delta=0, 
#                            patience=10, 
#                            verbose=1
#                        )

opt=tf.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.99, epsilon=1e-08, decay=0.0)  
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
history=model.fit(X_train, Y_train,
            batch_size=25, epochs = 100,
            verbose=1,
            validation_data=(X_vali, Y_vali))
#            callbacks=[history])#callbacks回调，将数据传给history
#模型评估
score1 = model.evaluate(X_test, Y_test, verbose=1)
score2 = model.evaluate(X_vali, Y_vali, verbose=1)
#print('Test score:', score1[0])
print('Test accuracy:', score1[1])
print('Vali accuracy:', score2[1])
#####################可视化损失函数
#T2 = time.time()
#print('程序运行时间:%s秒' % ((T2 - T1)))
epochs=range(len(history.history['accuracy']))
plt.figure()
plt.plot(epochs,history.history['accuracy'],'b-',label='Training acc')
plt.plot(epochs,history.history['val_accuracy'],'r-',label='Validation acc')
plt.title('All Traing and Validation accuracy')
plt.legend()

#plt.savefig('/root/notebook/help/figure/model_V3.1_acc.jpg')
plt.figure()
plt.plot(epochs,history.history['loss'],'b-',label='Training loss')
plt.plot(epochs,history.history['val_loss'],'r-',label='Validation loss')
plt.title('All Traing and Validation loss')
plt.legend()
######################
#epochs=range(len(history.history['accuracy']))
#e0=history.history['val_accuracy']
#f0=history.history['val_loss']
#plt.plot(epochs,b1,'b-',label='dropout(0.1)_acc')
#plt.plot(epochs,b2,'r-',label='dropout(0.2)_acc')
#plt.plot(epochs,b3,'y-',label='dropout(0.3)_acc')
#plt.plot(epochs,b4,'b.-',label='dropout(0.4)_acc')
#plt.plot(epochs,b5,'r.-',label='dropout(0.5)_acc')
#
#plt.legend()
import itertools

 # 显示混淆矩阵
from pylab import mpl

def plot_confuse(model, x_val, y_val):
        predictions = model.predict_classes(x_val)
        truelabel = y_val.argmax(axis=-1)  # 将one-hot转化为label
        truelabel=truelabel
        conf_mat = confusion_matrix(y_true=truelabel, y_pred=predictions)
        df=pd.DataFrame(conf_mat)
        mpl.rcParams['font.family'] = 'sans-serif'
        mpl.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
        font = {'family': 'sans-serif',
                    'color': 'k',
                    'weight': 'normal',
                    'size': 25,}
         
         
        f, ax = plt.subplots(figsize=(8, 6))
        cmap = sns.cm.rocket_r  #colorbar颜色反转
        ax = sns.heatmap(df, annot=False, ax=ax, fmt='.0f', cmap=cmap) #画heatmap，具体参数可以查文档
         
        plt.xlabel('real_label',fontsize=15, color='k') #x轴label的文本和字体大小
        plt.ylabel('predict_label',fontsize=15, color='k') #y轴label的文本和字体大小
        plt.xticks(fontsize=10) #x轴刻度的字体大小（文本包含在pd_data中了）
        plt.yticks(fontsize=10) #y轴刻度的字体大小（文本包含在pd_data中了）
       # plt.title('title',fontsize=10) #图片标题文本和字体大小
        #设置colorbar的刻度字体大小
        cax = plt.gcf().axes[-1]
        cax.tick_params(labelsize=10)
        #设置colorbar的label文本和字体大小
        cbar = ax.collections[0].colorbar
        #cbar.set_label(r'$NMI$',fontdict=font)
        plt.savefig('test_xx.png', dpi=500, bbox_inches='tight', transparent=False)
 
        plt.show()

     #plt.figure()
    # plot_confusion_matrix(conf_mat, range(np.max(truelabel)+1))

#混淆矩阵经常用来表示分类的效果。
plot_confuse(model, X_test, Y_test)  
  