'''
# 使用在imagenet上预训练的ResNet50模型
# 预训练模型要求三通道：CT图，PET图，全零图组成三通道
'''


'''
# CT和PET组成双通道图片512x512输入训练
# 使用扩充数据集进行训练
# 使用权重损失函数
'''

import numpy as np
import os
import cv2
import time
import shutil
import random
import datetime
import pandas as pd
from glob import glob
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.callbacks import TensorBoard, ReduceLROnPlateau, ModelCheckpoint, LearningRateScheduler, EarlyStopping
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.initializers import glorot_uniform, random_normal
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from tensorflow.keras.layers import AveragePooling2D, MaxPooling2D, Concatenate, Dropout, GlobalAveragePooling2D
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.keras.applications.resnet50 import ResNet50


model_name = "five_pretrain_FPM"
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
log_dir = "/home/zmy/workspace2021/five_logs/" + model_name + time_str
work_dir = "/home/zmy/workspace2021/five_workdirs/" + model_name + time_str
model_save_path = "/home/zmy/workspace2021/five_models/" + model_name


# 读取文件列表
def read_csv():
    sets_path = '/data1/zmy/data_augmentation/kfold_dataset/'

    train_sets = ['1.csv',
                  '2.csv',
                  '3.csv',
                  '4.csv']

    test_set = '0.csv'

    # 读取数据集
    train_features = []
    test_features = []

    # 读取训练集
    for set in train_sets:
        train_data = pd.read_csv(sets_path+set)
        for j in range(len(train_data)):

            # 读取文件地址
            patientid = train_data['patientID'][j]

            ct_path = '/data1/zmy/data_augmentation/Slice/'+str(patientid)+'/CTSlice/'
            name_list = os.listdir(ct_path)

            pet_path = '/data1/zmy/data_augmentation/Slice/'+str(patientid)+'/PETSlice/'
            for it in name_list:

                one_feature = [patientid, int(train_data['cancer_type'][j])-1,
                               ct_path+it, pet_path+it]

                train_features.append(one_feature)

    # 读取测试集
    test_data = pd.read_csv(sets_path+test_set)
    for i in range(len(test_data)):
        # 读取文件地址
        patientid = test_data['patientID'][i]

        ct_path = '/data1/zmy/data_augmentation/Slice/' + str(patientid) + '/CTSlice/'
        name_list = os.listdir(ct_path)

        pet_path = '/data1/zmy/data_augmentation/Slice/' + str(patientid) + '/PETSlice/'
        for it in name_list:
            one_feature = [patientid, int(test_data['cancer_type'][i]) - 1,
                           ct_path + it, pet_path + it]

            test_features.append(one_feature)


    return train_features, test_features



def standard_scaler(features):
    # scaler = MinMaxScaler()  # 线性归一化： 归一化到【0，1】
    scaler = StandardScaler()  # 正态分布归一化：减均值除标准差
    # scaler = MaxAbsScaler()  # 绝对值归一化：归一化到【-1，1】
    x = scaler.fit_transform(features)

    return x



# 读取病人二维切片
def read_img(path, label):
    # path = '/data1/zmy/data_augmentation/Slice/3107/CTSlice/'
    ctList = os.listdir(path)
    ctList.sort()

    imglist = []
    labellist = []
    for i in range(len(ctList)):
        img = np.load(path+ctList[i])
        imglist.append(img)
        labellist.append(int(label))


    return imglist, labellist



# 训练batch生成器
def data_generator(batch_size, data_list, istrain):
    # 定义返回标志
    batch_idx = 0

    # 样本权重：按类比赋予
    # s_weights = [1, 4, 21, 8, 25]

    # 如果是训练集，打乱数据集
    if istrain:
        random.shuffle(data_list)

    while True:

        # 定义返回特征
        ct_features = []
        pet_features = []
        zeros_features = []

        labels = []
        # sample_weights = []

        zero_add = np.zeros((512, 512), dtype=np.int)

        for item in data_list:

            # sample_weights.append(s_weights[item[15]])

            # 加载标签
            labels.append(int(item[1]))

            # 加载图像
            ct_path = str(item[2])
            ct = np.load(ct_path)
            ct_features.append(ct)


            pet_path = str(item[3])
            pet = np.load(pet_path)
            pet_features.append(pet)

            zeros_features.append(zero_add)

            # pet图像归一化
            # max_pixel = np.max(pet)
            # min_pixel = np.min(pet)
            # pet = (pet - min_pixel) / float(max_pixel)

            batch_idx = batch_idx+1

            if batch_idx >= batch_size:

                ct_features = np.asarray(ct_features)
                pet_features = np.asarray(pet_features)
                zeros_features = np.asarray(zeros_features)
                labels = np.asarray(labels, dtype=np.int)

                # 标签转换为onehot编码
                onehot_labels = to_categorical(labels, 5)


                # train test ct和pet进行合并
                # 添加全零层构建三通道
                img_feature = np.stack((ct_features, pet_features, zeros_features), axis=3)
                # print(img_feature.shape)

                yield {'input_1': img_feature}, {'outclass': onehot_labels}

                ct_features = []
                pet_features = []
                zeros_features = []
                labels = []
                # sample_weights = []
                batch_idx = 0



# 加载验证/测试集
def read_testdata(test_features):


    ct_features = []
    pet_features = []
    zeros_features = []
    labels = []
    # sample_weights = []

    zero_add = np.zeros((512, 512), dtype=np.int)

    for item in test_features:

        # sample_weights.append(s_weights[item[15]])

        labels.append(int(item[1]))


        # 加载图像
        ct_path = str(item[2])
        ct = np.load(ct_path)
        ct_features.append(ct)

        pet_path = str(item[3])
        pet = np.load(pet_path)
        pet_features.append(pet)


        zeros_features.append(zero_add)

        # pet图像归一化
        # max_pixel = np.max(pet)
        # min_pixel = np.min(pet)
        # pet = (pet - min_pixel) / float(max_pixel)



    ct_features = np.asarray(ct_features)
    pet_features = np.asarray(pet_features)
    zeros_features = np.asarray(zeros_features)
    labels = np.asarray(labels, dtype=np.int)

    # 标签转换为onehot编码
    onehot_labels = to_categorical(labels, 5)



    # train test ct和pet进行合并
    # 添加全零层构建三通道
    img_feature = np.stack((ct_features, pet_features, zeros_features), axis=3)

    # print(img_feature.shape)

    return img_feature, onehot_labels



# 以epoch为参数，得到一个新的学习率
def step_decay(epoch):
    res = 0.1
    if epoch % 10 == 0:
        res = res/10
    return res



def train():

    train_features, test_features = read_csv()
    batch_size = 8

    # 生成训练集和验证集
    train_gen = data_generator(batch_size, train_features, True)
    test_gen = data_generator(batch_size, test_features, False)

    # 读取验证集
    # test_img, test_onehot_labels = read_testdata(test_features)


    # 构建不带分类器的预训练模型
    base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(512, 512, 3))

    # 添加平均池化层
    x = base_model.output
    x = GlobalAveragePooling2D()(x)

    # 添加一个全连接层层
    x = Dense(1024, activation='relu')(x)

    # 添加一个分类器， 假设我们有5类
    predictions = Dense(5, activation='softmax', name='outclass')(x)

    # 构建我们需要训练的完整模型
    model = Model(inputs=base_model.input, outputs=predictions)


    # 打印模型
    model.summary()


    # 首先， 我们只训练顶部的几层（随机初始化的层）
    # 锁住所有Resnet50的卷积层
    # for layer in base_model.layers:
    #     layer.trainable = False

    # 编译模型（一定要在锁层以后操作）
    # sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['categorical_accuracy'])

    if not os.path.exists(work_dir):
        os.makedirs(work_dir)

    checkpoint = ModelCheckpoint(filepath=work_dir + "/" + model_name + "_" + "_e" + "{epoch:02d}-{val_loss:.4f}.hd5",
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=False,
                                 save_weights_only=False,
                                 mode='auto',
                                 period=5)

    # 每隔一轮且每当val_loss降低时保存一次模型
    best_model_path = work_dir + "/" + model_name + "_best.hd5"
    checkpoint_fixed_name = ModelCheckpoint(filepath=best_model_path,
                                            monitor='val_loss',
                                            verbose=1,
                                            save_best_only=True,
                                            save_weights_only=False,
                                            mode='auto',
                                            period=1)
    # 不同类别损失权重
    cw = {0: 1, 1: 4, 2: 21, 3: 8, 4: 25}


    model.compile(optimizer=SGD(lr=0.001, momentum=0.9), loss='categorical_crossentropy', metrics=['categorical_accuracy'])

    model.fit_generator(generator=train_gen,
                        steps_per_epoch=len(train_features) // batch_size,
                        epochs=100,
                        validation_data=test_gen,
                        validation_steps=len(test_features) // batch_size,
                        class_weight=cw,
                        callbacks=[checkpoint, checkpoint_fixed_name, TensorBoard(log_dir=log_dir)])


def predict():
    model_path = "/home/zmy/workspace2021/five_workdirs/five_resnet50_FPM20210312_153545/five_resnet50_FPM_best.hd5"
    model = load_model(model_path)

    train_features, test_features = read_csv()
    test_img, test_onehot_labels = read_testdata(test_features)

    # 测试集推理结果, 这里是五分类概率
    result = model.predict(test_img)

    # 将五分类概率结果转换为下标标签
    preds = np.argmax(result, axis=1)
    labels = np.argmax(test_onehot_labels, axis=1)
    print(preds)
    print(labels)

    # 计算评估指标
    target_names = ['1', '2', '3', '4', '5']
    result_statis = classification_report(labels, preds, target_names=target_names)
    print(result_statis)

    # 计算混淆矩阵
    confusion = confusion_matrix(labels, preds)
    print(confusion)


if __name__ == "__main__":
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)
    train()
    # best_model_path = work_dir + "/" + model_name + "_best.hd5"
    # shutil.copy(best_model_path, model_save_path)

    # predict()

