'''
作为预测代码，可用于对欠采样模型进行合并预测
1. FPM_undersampling_train.py: FPM_undersample_*

对八个模型的结果执行投票，生成最终结果1
将八个模型的概率结果相加，得到最大概率结果，生成最终结果2
比较结果1，2，取效果好的作为最终方案
'''

import os
import pandas as pd
import numpy as np

import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision.models import resnet50

from sklearn.metrics import classification_report, confusion_matrix



BatchSize = 16


# 自定义数据集和数据预处理
class MyDataset(Dataset):

    def __init__(self, datalist):
        self.data_info = datalist


    def __len__(self):
        return len(self.data_info)

    def __getitem__(self, item):
        patientID = self.data_info[item][0]
        label = self.data_info[item][1]
        ct_path = self.data_info[item][2]
        pet_path = self.data_info[item][3]
        pet_slope = self.data_info[item][4]
        pet_intercept = self.data_info[item][5]

        ct = np.load(ct_path)
        pet = np.load(pet_path)

        # pet图像转化HU值
        if pet_slope != 1:
            pet = pet * pet.astype(np.float64)
            pet = pet.astype(np.int32)
        pet += np.int32(pet_intercept)

        # pet图像归一化
        pet = MaxMinNormalizer(pet)

        # ct和pet进行合并
        img = merge_CT_PET(ct, pet)

        return {'image': torch.from_numpy(img), 'label': torch.tensor(label)}


# 读取文件列表
def read_csv(data_sets):
    sets_path = '/data1/zmy/data2021/origin_data/divide_csv/under_sampling_five/'

    # 读取数据集
    data_features = []

    for set in data_sets:
        train_data = pd.read_csv(sets_path+set)
        for j in range(len(train_data)):

            # 读取文件地址
            patientid = train_data['patientID'][j]

            ct_path = '/data1/zmy/data2021/origin_data/Slice/'+str(patientid)+'/CTSlice/'

            name_list = os.listdir(ct_path)

            pet_path = '/data1/zmy/data2021/origin_data/Slice/'+str(patientid)+'/PETSlice/'
            pet_slope = float(train_data['pet_slope'][j])
            pet_intercept = float(train_data['pet_intercept'][j])

            for it in name_list:

                one_feature = [patientid, int(train_data['cancer_type'][j])-1,
                               ct_path+it, pet_path+it, pet_slope, pet_intercept]

                data_features.append(one_feature)


    return data_features


# 图像归一化
def MaxMinNormalizer(data):
    data_max = np.max(data)
    data_min = np.min(data)
    data_normalize = 1 - (data - data_min) / (data_max - data_min)
    return data_normalize


# CT和PET融合
def merge_CT_PET(ct_array, pet_array):

    img = np.asarray([ct_array, pet_array], dtype=np.float)
    return img



# 创建网络
def resnet():
    # 创建resnet50网络
    net = resnet50(pretrained=False)
    net.conv1 = nn.Conv2d(2, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
    num_ftrs = net.fc.in_features
    net.fc = nn.Linear(in_features=num_ftrs, out_features=5, bias=True)



    # 打印网络结构和参数量
    # print(net)
    # print("Total number of paramerters in networks is {}  ".format(sum(x.numel() for x in net.parameters())))
    return net

# 单个模型结果预测
def predict(weight_path):

    # 加载模型权重
    net.load_state_dict(torch.load(weight_path))
    net.eval()

    # 返回的预测结果及对应标签
    y_preds = []  # 网络直接输出
    y_preds_maxclass = []
    y_true = []

    with torch.no_grad():
        for n_iter, data in enumerate(testloader):
            print('iteration:{}\ttotal {} iterations'.format(n_iter + 1, len(testloader)))

            images = data['image'].type(torch.FloatTensor).to(device)
            labels = data['label'].to(device)

            outputs = net(images)

            outputs = nn.Softmax(dim=1)(outputs)

            _, preds = outputs.max(1)



            y_preds.extend(outputs.tolist())
            y_preds_maxclass.extend(preds.tolist())
            y_true.extend(labels.tolist())

            print('outputs: ', outputs)
            print('preds: ', preds)
            print('labels: ', labels)
        y_preds = np.asarray(y_preds, dtype=np.float)
        y_preds_maxclass = np.asarray(y_preds_maxclass, dtype=np.int)
        y_true = np.asarray(y_true, dtype=np.int)
    return y_preds, y_preds_maxclass, y_true


# 得到8个模型的预测结果
def all_predict():

    save_path1 = '/home/zmy/pytorch_code/checkpoint/FPM_undersample_0_20210415_194156'
    weight_name1 = 'FPM_undersample_0-38-1.9969385912021-regular.pth'
    weights_path1 = save_path1 + '/' + weight_name1

    y_preds1, y_preds_maxclass1, y_true1 = predict(weights_path1)


    save_path2 = '/home/zmy/pytorch_code/checkpoint/FPM_undersample_1_20210415_194310'
    weight_name2 = 'FPM_undersample_1-79-5.34326308965683-regular.pth'
    weights_path2 = save_path2 + '/' + weight_name2

    y_preds2, y_preds_maxclass2, y_true2 = predict(weights_path2)

    save_path3 = '/home/zmy/pytorch_code/checkpoint/FPM_undersample_2_20210415_201816'
    weight_name3 = 'FPM_undersample_2-68-3.422107517719269-regular.pth'
    weights_path3 = save_path3 + '/' + weight_name3

    y_preds3, y_preds_maxclass3, y_true3 = predict(weights_path3)

    save_path4 = '/home/zmy/pytorch_code/checkpoint/FPM_undersample_3_20210415_201859'
    weight_name4 = 'FPM_undersample_3-90-4.270656704902649-regular.pth'
    weights_path4 = save_path4 + '/' + weight_name4

    y_preds4, y_preds_maxclass4, y_true4 = predict(weights_path4)

    save_path5 = '/home/zmy/pytorch_code/checkpoint/FPM_undersample_4_20210415_205450'
    weight_name5 = 'FPM_undersample_4-47-2.3367469906806946-regular.pth'
    weights_path5 = save_path5 + '/' + weight_name5

    y_preds5, y_preds_maxclass5, y_true5 = predict(weights_path5)

    save_path6 = '/home/zmy/pytorch_code/checkpoint/FPM_undersample_5_20210415_205545'
    weight_name6 = 'FPM_undersample_5-83-6.921171168486278-regular.pth'
    weights_path6 = save_path6 + '/' + weight_name6

    y_preds6, y_preds_maxclass6, y_true6 = predict(weights_path6)

    save_path7 = '/home/zmy/pytorch_code/checkpoint/FPM_undersample_6_20210416_101454'
    weight_name7 = 'FPM_undersample_6-98-4.482185631990433-regular.pth'
    weights_path7 = save_path7 + '/' + weight_name7

    y_preds7, y_preds_maxclass7, y_true7 = predict(weights_path7)

    save_path8 = '/home/zmy/pytorch_code/checkpoint/FPM_undersample_7_20210416_101757'
    weight_name8 = 'FPM_undersample_7-81-14.932958443959555-regular.pth'
    weights_path8 = save_path8 + '/' + weight_name8

    y_preds8, y_preds_maxclass8, y_true8 = predict(weights_path8)


    # 结果合并, 方法1：所有模型概率相加，再取最大
    probablity_result = y_preds1+y_preds2+y_preds3+y_preds4+y_preds5+y_preds6+y_preds7+y_preds8
    result = np.argmax(probablity_result, axis=1)
    print(result)
    print(y_true1.shape)

    # 结果合并，方法2：投票
    class_preds = [y_preds_maxclass1, y_preds_maxclass2, y_preds_maxclass3, y_preds_maxclass4,
                    y_preds_maxclass5, y_preds_maxclass6, y_preds_maxclass7, y_preds_maxclass8]

    class_preds = np.asarray(class_preds, dtype=np.int)

    class_result = []
    for i in range(class_preds.shape[1]):
        num_class = [0, 0, 0, 0, 0]
        for j in range(class_preds.shape[0]):
            cancer_type = class_preds[j, i]
            num_class[cancer_type] += 1
        print(num_class)
        r = np.argmax(num_class)
        class_result.append(r)

    return result, class_result, y_true1


# 结果评估
def evaluation(preds, labels):

    # 计算评估指标
    target_names = ['1', '2', '3', '4', '5']
    result_statis = classification_report(y_true=labels, y_pred=preds, target_names=target_names)
    print(result_statis)

    # 计算混淆矩阵
    confusion = confusion_matrix(y_true=labels, y_pred=preds)
    print(confusion)


if __name__ == '__main__':
    device = torch.device("cuda:2")

    # 创建网络结构
    net = resnet().to(device)

    # 加载测试数据
    test_data_sets = ['test.csv']
    test_list = read_csv(test_data_sets)
    test_dataset = MyDataset(test_list)
    testloader = DataLoader(test_dataset, batch_size=BatchSize, shuffle=False, num_workers=2)

    # 预测结果
    preds, class_preds, labels = all_predict()

    # 结果评估
    # 概率相加取最大
    print('结果：概率相加取最大')
    evaluation(preds, labels)

    # 投票
    print('结果：投票')
    evaluation(class_preds, labels)


