'''
五分类全连接模型
使用五折交叉验证方法
使用权重损失函数
'''
import os
import time
import math
import shutil
import random
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.initializers import glorot_uniform, random_normal
from tensorflow.keras.utils import to_categorical

Epoch = 100
Batch_size = 128
model_name = "five_MLP_4"
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
log_dir = "/home/zmy/workspace2021/five_logs/" + model_name + time_str
work_dir = "/home/zmy/workspace2021/five_workdirs/" + model_name + time_str
model_save_path = "/home/zmy/workspace2021/five_models/" + model_name



# 读取五折交叉验证数据集
def read_data():
    sets_path = '/data1/zmy/data/kfold_dataset/'

    train_sets = ['dataset3.csv',
                  'dataset0.csv',
                  'dataset1.csv',
                  'dataset2.csv']

    test_set = 'dataset4.csv'

    # 读取数据集
    train_features = []
    train_labels = []
    test_features = []
    test_labels = []

    # 读取训练集
    for set in train_sets:
        train_data = pd.read_csv(sets_path+set)
        for j in range(len(train_data)):
            one_feature = [train_data['z'][j], train_data['x'][j], train_data['y'][j], train_data['r'][j],
                           train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                           train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                           train_data['local_suvavg'][j], train_data['local_suvstd'][j], train_data['local_suvvar'][j],
                           train_data['lungW'][j], train_data['lungH'][j]]

            train_features.append(one_feature)
            train_labels.append(train_data['cancer_type'][j] - 1)

    # 读取测试集
    test_data = pd.read_csv(sets_path+test_set)
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i], test_data['y'][i], test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i],
                       test_data['lungW'][i], test_data['lungH'][i]]

        test_features.append(one_feature)
        test_labels.append(test_data['cancer_type'][i] - 1)

    # 转变成数组
    x_train = np.asarray(train_features, dtype=np.float)
    x_test = np.asarray(test_features, dtype=np.float)
    y_train = np.asarray(train_labels, dtype=np.int)
    y_test = np.asarray(test_labels, dtype=np.int)

    # 随机打乱
    # index = np.arange(len(y_train))
    # x_train = x_train[index]
    # y_train = y_train[index]

    return x_train, y_train, x_test, y_test


# 计算类别权重，用于计算权重损失函数
def create_class_weight(mu=0.15):
    labels_dict = {0: 607, 1: 179, 2: 29, 3: 83, 4: 25}
    total = np.sum(list(labels_dict.values()))
    keys = list(labels_dict.keys())
    class_weight = dict()

    for key in keys:
        score = math.log(mu*total/float(labels_dict[key]))
        class_weight[key] = score if score > 1.0 else 1.0

    return class_weight


def standard_scaler(features):
    # scaler = MinMaxScaler()  # 线性归一化： 归一化到【0，1】
    scaler = StandardScaler()  # 正态分布归一化：减均值除标准差
    # scaler = MaxAbsScaler()  # 绝对值归一化：归一化到【-1，1】
    x = scaler.fit_transform(features)

    return x


# 线性层--》BN层--》激活函数--》Dropout
def create_model():
    model = Sequential()
    model.add(Dense(64, input_shape=(15,)))
    model.add(BatchNormalization(axis=2))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization(axis=2))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization(axis=2))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization(axis=2))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(5, activation='softmax'))

    # 备选损失函数：rmsprop, categorical_crossentropy
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

    return model


def train():
    # 读取数据
    x_train, y_train, x_test, y_test = read_data()

    # 数据归一化
    x_train = standard_scaler(x_train)
    x_test = standard_scaler(x_test)

    # 标签转变为one-hot编码
    y_train = to_categorical(y_train, 5)
    y_test_onehot = to_categorical(y_test, 5)

    # 搭建神经网络
    model = create_model()

    # 展示模型
    model.summary()

    # 训练并验证

    # cp_callback=ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=True, period=5)
    # model.save_weights(checkpoint_path.format(epoch=0))

    if not os.path.exists(work_dir):
        os.makedirs(work_dir)
    # checkpoint = ModelCheckpoint(filepath=work_dir + "/" + model_name + "_" + "_e" + "{epoch:02d}-{val_loss:.4f}.hd5",
    #                              monitor='val_loss',
    #                              verbose=1,
    #                              save_best_only=False,
    #                              save_weights_only=False,
    #                              mode='auto',
    #                              period=5)

    # 每隔一轮且每当val_loss降低时保存一次模型
    best_model_path = work_dir + "/" + model_name + "_best.hd5"
    checkpoint_fixed_name = ModelCheckpoint(filepath=best_model_path,
                                            monitor='val_loss',
                                            verbose=1,
                                            save_best_only=True,
                                            save_weights_only=False,
                                            mode='auto',
                                            period=1)

    # 对类进行加权，用于在训练过程中调整损失函数
    # 这里类别权重取:1类为1，其他类为：取整（一类数量/当前类数量）
    cw = {0: 1, 1: 4, 2: 21, 3: 8, 4: 25}
    # cw = create_class_weight()
    model.fit(x_train, y_train,
              epochs=Epoch,
              batch_size=Batch_size,
              validation_data=(x_test, y_test_onehot),
              verbose=1,
              shuffle=True,
              class_weight=cw,
              callbacks=[checkpoint_fixed_name, TensorBoard(log_dir=log_dir)])

    # 测试集推理结果
    # result = model.predict(x_test)
    # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
    # print('test loss: ', test_loss)
    # print('test acc: ', test_acc)
    # print('result: ', result)
    # print(type(result))

    # 测试
    shutil.copy(best_model_path, model_save_path)

    model_path = "/home/zmy/workspace2021/five_models/five_MLP_4/five_MLP_4_best.hd5"
    model = load_model(model_path)

    # 测试集推理结果, 这里是五分类概率
    result = model.predict(x_test)
    # print(result)
    # test_loss, test_acc = model.evaluate(x_test, y_test_onehot, verbose=2)
    # print('test loss: ', test_loss)
    # print('test acc: ', test_acc)

    # 将五分类概率结果转换为下标标签
    preds = np.argmax(result, axis=1)
    # print(preds)
    # print(y_test)

    # 计算评估指标
    target_names = ['1', '2', '3', '4', '5']
    result_statis = classification_report(y_test, preds, target_names=target_names)
    print(result_statis)

    # 计算混淆矩阵
    confusion = confusion_matrix(y_test, preds)
    print(confusion)


def predict():
    # 读取数据
    x_train, y_train, x_test, y_test = read_data()

    # 数据归一化
    x_train = standard_scaler(x_train)
    x_test = standard_scaler(x_test)

    # 标签转变为one-hot编码
    y_train_onehot = to_categorical(y_train, 5)
    y_test_onehot = to_categorical(y_test, 5)

    # 搭建神经网络
    # model = create_model()

    # 展示模型
    # model.summary()

    model_path = "/home/zmy/workspace2021/five_workdirs/five_MLP_120210306_191656/five_MLP_1_1.1297.hd5"
    model = load_model(model_path)

    # 测试集推理结果, 这里是五分类概率
    result = model.predict(x_test)
    print(result)
    test_loss, test_acc = model.evaluate(x_test, y_test_onehot, verbose=2)
    print('test loss: ', test_loss)
    print('test acc: ', test_acc)

    # 将五分类概率结果转换为下标标签
    preds = np.argmax(result, axis=1)
    print(preds)
    print(y_test)

    # 计算评估指标
    target_names = ['1', '2', '3', '4', '5']
    result_statis = classification_report(y_test, preds, target_names=target_names)
    print(result_statis)

    # 计算混淆矩阵
    confusion = confusion_matrix(y_test, preds)
    print(confusion)


if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)
    train()
    # best_model_path = work_dir + "/" + model_name + "_best.hd5"
    # shutil.copy(best_model_path, model_save_path)

    # predict()