import os
import time
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.initializers import glorot_uniform, random_normal
from tensorflow.keras.utils import to_categorical

Epoch = 100
Batch_size = 128
model_name = "five_MLP_1"
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
log_dir = "/home/zmy/workspace2021/five_logs/" + model_name + time_str
work_dir = "/home/zmy/workspace2021/five_workdirs/" + model_name + time_str
model_save_path = "/home/zmy/workspace2021/five_models/" + model_name


# 读取五折交叉验证数据集
def read_data():
    sets_path = '/data1/zmy/data/kfold_dataset/'

    train_sets = ['dataset0.csv',
                  'dataset1.csv',
                  'dataset2.csv',
                  'dataset3.csv']

    test_set = 'dataset4.csv'

    # 读取数据集
    train_features = []
    train_labels = []
    test_features = []
    test_labels = []

    # 读取训练集
    for set in train_sets:
        train_data = pd.read_csv(sets_path+set)
        for j in range(len(train_data)):
            one_feature = [train_data['z'][j], train_data['x'][j], train_data['y'][j], train_data['r'][j],
                           train_data['patientWeight'][j], train_data['patientSex'][j], train_data['patientAge'][j],
                           train_data['patientSize'][j], train_data['local_suvmax'][j], train_data['local_suvmin'][j],
                           train_data['local_suvavg'][j], train_data['local_suvstd'][j], train_data['local_suvvar'][j]]

            train_features.append(one_feature)
            train_labels.append(train_data['cancer_type'][j] - 1)

    # 读取测试集
    test_data = pd.read_csv(sets_path+test_set)
    for i in range(len(test_data)):
        one_feature = [test_data['z'][i], test_data['x'][i], test_data['y'][i], test_data['r'][i],
                       test_data['patientWeight'][i], test_data['patientSex'][i], test_data['patientAge'][i],
                       test_data['patientSize'][i], test_data['local_suvmax'][i], test_data['local_suvmin'][i],
                       test_data['local_suvavg'][i], test_data['local_suvstd'][i], test_data['local_suvvar'][i]]

        test_features.append(one_feature)
        test_labels.append(test_data['cancer_type'][i] - 1)

    # 转变成数组
    x_train = np.asarray(train_features, dtype=np.float)
    x_test = np.asarray(test_features, dtype=np.float)
    y_train = np.asarray(train_labels, dtype=np.int)
    y_test = np.asarray(test_labels, dtype=np.int)

    return x_train, y_train, x_test, y_test


def standard_scaler(features):
    scaler = MinMaxScaler()
    # scaler = StandardScaler()
    x = scaler.fit_transform(features)

    return x

# 线性层--》BN层--》激活函数--》Dropout
def create_model():
    model = Sequential()
    model.add(Dense(64, input_shape=(13,)))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(64, kernel_initializer=random_normal(mean=0.0, stddev=0.05, seed=None)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(5, activation='softmax'))

    # 备选损失函数：rmsprop, categorical_crossentropy
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    return model


def train():
    # 读取数据
    x_train, y_train, x_test, y_test = read_data()

    # 数据归一化
    x_train = standard_scaler(x_train)
    x_test = standard_scaler(x_test)

    # 标签转变为one-hot编码
    y_train = to_categorical(y_train, 5)
    y_test = to_categorical(y_test, 5)

    # 搭建神经网络
    model = create_model()

    # 展示模型
    model.summary()

    # 训练并验证

    # cp_callback=ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=True, period=5)
    # model.save_weights(checkpoint_path.format(epoch=0))

    if not os.path.exists(work_dir):
        os.makedirs(work_dir)
    # checkpoint = ModelCheckpoint(filepath=work_dir + "/" + model_name + "_" + "_e" + "{epoch:02d}-{val_loss:.4f}.hd5",
    #                              monitor='val_loss',
    #                              verbose=1,
    #                              save_best_only=False,
    #                              save_weights_only=False,
    #                              mode='auto',
    #                              period=5)
    # 每隔一轮且每当val_loss降低时保存一次模型
    best_model_path = work_dir + "/" + model_name + "_best.hd5"
    checkpoint_fixed_name = ModelCheckpoint(filepath=best_model_path,
                                            monitor='val_loss',
                                            verbose=1,
                                            save_best_only=True,
                                            save_weights_only=False,
                                            mode='auto',
                                            period=1)

    model.fit(x_train, y_train,
              epochs=Epoch,
              batch_size=Batch_size,
              validation_data=(x_test, y_test),
              verbose=1,
              callbacks=[checkpoint_fixed_name, TensorBoard(log_dir=log_dir)])

    # 测试集推理结果
    # result = model.predict(x_test)
    # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
    # print('test loss: ', test_loss)
    # print('test acc: ', test_acc)
    # print('result: ', result)
    # print(type(result))




def predict():
    # 读取数据
    x_train, y_train, x_test, y_test = read_data()
    # 数据归一化
    x_train = standard_scaler(x_train)
    x_test = standard_scaler(x_test)
    # 搭建神经网络
    model = create_model()

    # 展示模型
    model.summary()

    model_path = "/home/zmy/keras_test/models/two_MLP_ct_pet_1/two_MLP_ct_pet_1_best.hd5"
    model.load_weights(model_path, by_name=False)
    # 测试集推理结果
    result = model.predict(x_test)
    test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
    print('test loss: ', test_loss)
    print('test acc: ', test_acc)
    # print('result: ', result)


if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)
    train()
    best_model_path = work_dir + "/" + model_name + "_best.hd5"
    shutil.copy(best_model_path, model_save_path)

    # predict()