'''
三分类全连接模型
'''
import os
import time
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.initializers import glorot_uniform, random_normal

Epoch = 100 
Batch_size = 128
model_name = "three_MLP"
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
log_dir = "/home/zmy/workspace2021/three_logs/" + model_name + time_str
work_dir = "/home/zmy/workspace2021/three_workdirs/" + model_name + time_str
model_save_path = "/home/zmy/workspace2021/three_models/" + model_name

# 按训练时划分的训练集和测试集
def read_data():
    test_data = pd.read_csv('/data1/zmy/data/divide_csv/three/test.csv')
    train_data = pd.read_csv('/data1/zmy/data/divide_csv/three/train.csv')
    test_features = []
    train_features = []
    test_labels = []
    train_labels = []
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i]/512, test_data['y'][i]/512, test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i]]

        test_features.append(one_feature)
        test_labels.append(test_data['cancer_type'][i] - 1)

    for j in range(len(train_data)):
        one_feature = [train_data['z'][j], train_data['x'][j]/512, train_data['y'][j]/512, train_data['r'][j],
                       train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                       train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                       train_data['local_suvavg'][j],  train_data['local_suvstd'][j], train_data['local_suvvar'][j]]
        train_features.append(one_feature)
        train_labels.append(train_data['cancer_type'][j] - 1)

    
    
    x_train = np.asarray(train_features, dtype=np.float)
    x_test = np.asarray(test_features, dtype=np.float)
    y_train = np.asarray(train_labels, dtype=np.int)
    y_test = np.asarray(test_labels, dtype=np.int)

    m = x_train.shape[0]
    permutation = list(np.random.permutation(m))
    x_train = x_train[permutation, :]
    y_train = y_train[permutation]

    y_train = to_categorical(y_train, 3)
    y_test = to_categorical(y_test, 3)

    return x_train, y_train, x_test, y_test

def standard_scaler(features):
    scaler = MinMaxScaler()
    # scaler = StandardScaler()
    x_train = scaler.fit_transform(features)
    
    return x_train

def create_model():
    model = Sequential()
    model.add(Dense(64, input_shape=(15,)))
    model.add(BatchNormalization(axis=2))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization(axis=2))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization(axis=2))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization(axis=2))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(3, activation='softmax'))

    # 备选损失函数：rmsprop, categorical_crossentropy
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

    return model

def train():
    # 读取数据
    x_train, y_train, x_test, y_test = read_data()
    # 数据归一化
    x_train = standard_scaler(x_train)
    x_test = standard_scaler(x_test)
    # 搭建神经网络
    model = create_model()

    # 展示模型
    model.summary()
    
    # 训练并验证
    tensorboard_callback=TensorBoard(log_dir=log_dir)
    
    # cp_callback=ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=True, period=5)
    # model.save_weights(checkpoint_path.format(epoch=0))

    if not os.path.exists(work_dir):
        os.makedirs(work_dir)
    checkpoint = ModelCheckpoint(filepath=work_dir + "/" + model_name + "_" + "_e" + "{epoch:02d}-{val_loss:.4f}.hd5",
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=False,
                                 save_weights_only=False,
                                 mode='auto',
                                 period=5)
    # 每隔一轮且每当val_loss降低时保存一次模型
    best_model_path = work_dir + "/" + model_name + "_best.hd5"
    checkpoint_fixed_name = ModelCheckpoint(filepath=best_model_path,
                                            monitor='val_loss',
                                            verbose=1,
                                            save_best_only=True,
                                            save_weights_only=False,
                                            mode='auto',
                                            period=1)

    model.fit(x_train, y_train,
            epochs=Epoch,
            batch_size=Batch_size,
            validation_data=(x_test, y_test),
            verbose=1,
            callbacks=[checkpoint, checkpoint_fixed_name, tensorboard_callback])


    # 测试
    shutil.copy(best_model_path, model_save_path)

    model_path = "/home/zmy/workspace2021/three_models/three_MLP/three_MLP_best.hd5"
    model = load_model(model_path)

    # 测试集推理结果, 这里是三分类概率
    result = model.predict(x_test)


    # 将三分类概率结果转换为下标标签
    preds = np.argmax(result, axis=1)
    # print(preds)
    # print(y_test)

    # 计算评估指标
    target_names = ['1', '2', '3']
    result_statis = classification_report(y_test, preds, target_names=target_names)
    print(result_statis)

    # 计算混淆矩阵
    confusion = confusion_matrix(y_test, preds)
    print(confusion)



if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"
    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)
    train()
    # best_model_path = work_dir + "/" + model_name + "_best.hd5"
    # shutil.copy(best_model_path, model_save_path)
