'''
二分类MLP模型
将预测结果绘制roc曲线和PR曲线评估
'''

import os
import time
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from tensorflow.keras.models import Model, load_model, Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.initializers import glorot_uniform, random_normal
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
import matplotlib.pyplot as plt

Epoch = 150
Batch_size = 128
model_name = "two_MLP"
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
log_dir = "/home/zmy/workspace2021/two_logs/" + model_name + time_str
work_dir = "/home/zmy/workspace2021/two_workdirs/" + model_name + time_str
model_save_path = "/home/zmy/workspace2021/two_models/" + model_name

# 二分类：绘制单一模型 ROC曲线
def auc_curve(y, prob):
    '''

    :param y: 真实标签
    :param prob: 预测概率
    :return: 打印ROC曲线
    '''
    fpr, tpr, threshold = roc_curve(y, prob)
    roc_auc = auc(fpr, tpr)

    lw = 2
    plt.figure(figsize=(10, 10))
    plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area=%0.3f)' % roc_auc)
    plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
    # 绘制医生的诊断效果点
    plt.plot([0.217], [0.686], marker='o', color='red')
    # 绘制二分类决策树效果点
    plt.plot([0.358], [0.686], marker='o', color='green')

    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('AUC')
    plt.legend(loc='lower right')
    plt.savefig('/home/zmy/workspace2021/two_MLP_ROC.png')
    plt.close()
    # plt.show()


# 二分类：绘制单一模型 P-R曲线
def pr_curve(y, prob):
    '''

    :param y: 真实标签
    :param prob: 预测概率
    :return: 打印P-R曲线
    '''
    average_precision = average_precision_score(y, prob)
    precision, recall, threshold = precision_recall_curve(y, prob)
    plt.step(recall, precision, color='b', alpha=0.2, where='post')
    plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')
    # 绘制医生的诊断效果点
    plt.plot([0.686], [0.48], marker='o', color='red')
    # 绘制二分类决策树效果点
    plt.plot([0.686], [0.36], marker='o', color='green')

    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(average_precision))
    plt.savefig('/home/zmy/workspace2021/two_MLP_PR.png')
    plt.close()
    # plt.show()


# 按训练时划分的训练集和测试集
def read_data():
    test_data = pd.read_csv('/data1/zmy/data/divide_csv/two/test.csv')
    train_data = pd.read_csv('/data1/zmy/data/divide_csv/two/train.csv')
    test_features = []
    train_features = []
    test_labels = []
    train_labels = []
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i], test_data['y'][i], test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i],
                       test_data['lungW'][i], test_data['lungH'][i]]

        test_features.append(one_feature)
        test_labels.append(test_data['cancer_type'][i] - 1)

    for j in range(len(train_data)):
        one_feature = [train_data['z'][j], train_data['x'][j], train_data['y'][j], train_data['r'][j],
                       train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                       train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                       train_data['local_suvavg'][j],  train_data['local_suvstd'][j], train_data['local_suvvar'][j],
                       train_data['lungW'][j], train_data['lungH'][j]]
        train_features.append(one_feature)
        train_labels.append(train_data['cancer_type'][j] - 1)

    
    
    x_train = np.asarray(train_features, dtype=np.float)
    x_test = np.asarray(test_features, dtype=np.float)
    y_train = np.asarray(train_labels, dtype=np.int)
    y_test = np.asarray(test_labels, dtype=np.int)

    m = x_train.shape[0]
    permutation = list(np.random.permutation(m))
    x_train = x_train[permutation, :]
    y_train = y_train[permutation]

    return x_train, y_train, x_test, y_test

def standard_scaler(features):
    scaler = MinMaxScaler()
    # scaler = StandardScaler()
    x_train = scaler.fit_transform(features)
    
    return x_train

def create_model():
    model = Sequential()
    model.add(Dense(64, input_shape=(15,)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))

    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

    return model

def train():
    # 读取数据
    x_train, y_train, x_test, y_test = read_data()

    # 数据归一化
    scaler = MinMaxScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.fit_transform(x_test)

    # 搭建神经网络
    model = create_model()

    # 展示模型
    model.summary()
    
    # 训练并验证
    tensorboard_callback=TensorBoard(log_dir=log_dir)


    if not os.path.exists(work_dir):
        os.makedirs(work_dir)

    checkpoint = ModelCheckpoint(filepath=work_dir + "/" + model_name + "_" + "_e" + "{epoch:02d}-{val_loss:.4f}.hd5",
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=False,
                                 save_weights_only=False,
                                 mode='auto',
                                 period=5)

    # 每隔一轮且每当val_loss降低时保存一次模型
    best_model_path = work_dir + "/" + model_name + "_best.hd5"
    checkpoint_fixed_name = ModelCheckpoint(filepath=best_model_path,
                                            monitor='val_loss',
                                            verbose=1,
                                            save_best_only=True,
                                            save_weights_only=False,
                                            mode='auto',
                                            period=1)

    model.fit(x_train, y_train,
            epochs=150,
            batch_size=Batch_size,
            validation_data=(x_test, y_test),
            verbose=1,
            shuffle=True,
            callbacks=[checkpoint, checkpoint_fixed_name, tensorboard_callback])


    best_model_path = work_dir + "/" + model_name + "_best.hd5"
    shutil.copy(best_model_path, model_save_path)
    model_path = "/home/zmy/workspace2021/two_models/two_MLP/two_MLP_best.hd5"
    model = load_model(model_path)

    # 测试集推理结果, 这里是概率结果
    result = model.predict(x_test)

    # 评估测试结果
    # auc_curve(y_test, result)
    # pr_curve(y_test, result)

def predict():
    # 读取数据
    x_train, y_train, x_test, y_test = read_data()

    # 数据归一化
    scaler = MinMaxScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.fit_transform(x_test)

    model_path = "/home/zmy/workspace2021/two_models/two_MLP/two_MLP_best.hd5"
    model = load_model(model_path)

    # 测试集推理结果, 这里是概率结果
    result = model.predict(x_test)

    np.save('/home/zmy/workspace2021/two_MLP_labels.npy', y_test)
    np.save('/home/zmy/workspace2021/two_MLP_preds.npy', result)


if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)
    train()
    # predict()


