import os
import cv2
import time
import datetime
import pandas as pd
import numpy as np

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.models import resnet50


model_name = 'FPM_one_channel'
DATA_FORMAT = '%Y%m%d_%H%M%S'
TIME_NOW = datetime.datetime.now().strftime(DATA_FORMAT)
checkpoint_path = '/home/zmy/pytorch_code/checkpoint/'+model_name+'_'+TIME_NOW
LOG_DIR = '/home/zmy/pytorch_code/logdir/'+model_name+'_'+TIME_NOW


BatchSize = 16


# 自定义数据集和数据预处理
class MyDataset(Dataset):

    def __init__(self, datalist):
        self.data_info = datalist


    def __len__(self):
        return len(self.data_info)

    def __getitem__(self, item):
        patientID = self.data_info[item][0]
        label = self.data_info[item][1]
        ct_path = self.data_info[item][2]
        pet_path = self.data_info[item][3]
        pet_slope = self.data_info[item][4]
        pet_intercept = self.data_info[item][5]

        ct = np.load(ct_path)
        pet = np.load(pet_path)

        # pet图像转化HU值
        if pet_slope != 1:
            pet = pet * pet.astype(np.float64)
            pet = pet.astype(np.int32)
        pet += np.int32(pet_intercept)

        # pet图像归一化
        pet = MaxMinNormalizer(pet)

        # ct和pet进行合并
        img = merge_CT_PET(ct, pet)

        return {'image': torch.from_numpy(img), 'label': torch.tensor(label)}


# 读取文件列表
def read_csv(data_sets):
    sets_path = '/data1/zmy/data2021/origin_data/divide_csv/five/'

    # 读取数据集
    data_features = []

    for set in data_sets:
        train_data = pd.read_csv(sets_path+set)
        for j in range(len(train_data)):

            # 读取文件地址
            patientid = train_data['patientID'][j]

            ct_path = '/data1/zmy/data2021/origin_data/Slice/'+str(patientid)+'/CTSlice/'

            name_list = os.listdir(ct_path)

            pet_path = '/data1/zmy/data2021/origin_data/Slice/'+str(patientid)+'/PETSlice/'
            pet_slope = float(train_data['pet_slope'][j])
            pet_intercept = float(train_data['pet_intercept'][j])

            for it in name_list:

                one_feature = [patientid, int(train_data['cancer_type'][j])-1,
                               ct_path+it, pet_path+it, pet_slope, pet_intercept]

                data_features.append(one_feature)


    return data_features


# 图像归一化
def MaxMinNormalizer(data):
    data_max = np.max(data)
    data_min = np.min(data)
    data_normalize = 1 - (data - data_min) / (data_max - data_min)
    return data_normalize


# CT和PET融合
def merge_CT_PET(ct_array, pet_array):


    # 融合比例
    percent = 0.5
    img_mix = cv2.addWeighted(ct_array, percent, pet_array, 1 - percent, 0)

    img_mix = np.expand_dims(img_mix, 0)

    return img_mix


# 创建网络
def resnet():

    # 创建resnet50网络
    net = resnet50(pretrained=False)
    net.conv1 = nn.Conv2d(2, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
    num_ftrs = net.fc.in_features
    net.fc = nn.Linear(in_features=num_ftrs, out_features=5, bias=True)

    # 打印网络结构和参数量
    print(net)
    print("Total number of paramerters in networks is {}  ".format(sum(x.numel() for x in net.parameters())))

    return net


def train(epoch):
    start = time.time()
    net.train()

    epoch_loss = 0.0

    # 记录每次epoch,猜对的各类别数量以及训练的各类别数量
    correct = torch.zeros(5, dtype=torch.float).to(device)
    total = torch.zeros(5, dtype=torch.float).to(device)

    for batch_index, data in enumerate(trainloader):
        images = data['image'].type(torch.FloatTensor).to(device)
        labels = data['label'].type(torch.LongTensor).to(device)

        optimizer.zero_grad()
        outputs = net(images)
        loss = loss_function(outputs, labels)
        loss.backward()
        optimizer.step()

        epoch_loss += loss.item()

        _, preds = outputs.max(1)

        # 每个batch相等的类别值
        equal = labels[labels == preds]

        # 统计各类别数量
        correct[0] += (equal == 0).sum()
        total[0] += (labels == 0).sum()

        correct[1] += (equal == 1).sum()
        total[1] += (labels == 1).sum()

        correct[2] += (equal == 2).sum()
        total[2] += (labels == 2).sum()

        correct[3] += (equal == 3).sum()
        total[3] += (labels == 3).sum()

        correct[4] += (equal == 4).sum()
        total[4] += (labels == 4).sum()

        #  指代整个训练过程中batch的index
        n_iter = (epoch-1)*len(trainloader)+batch_index+1

        print('Training Epoch: {epoch}[{trained_samples}/{total_samples}]\tLR:{:0.6f}\tLoss:{:0.4f}'.format(
            optimizer.param_groups[0]['lr'],
            loss.item(),
            epoch=epoch,
            trained_samples=batch_index * BatchSize + len(images),
            total_samples=len(trainloader.dataset)
        ))

        # update training loss for each iteration
        writer.add_scalar('Train/batch_loss', loss.item(), n_iter)


    finish = time.time()
    # 计算各类别准确率
    class_accuracy = correct / total

    print(
        'Train Epoch: {}, loss: {:.4f}, Total_Accuracy: {:.4f}, Average_Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
            epoch,
            epoch_loss / len(trainloader),
            correct.sum().float() / total.sum().float(),
            class_accuracy.mean(),
            finish - start
        ))

    writer.add_scalar('Train/epoch_loss', epoch_loss / len(trainloader), epoch)
    writer.add_scalar('Train/Total_Accuracy', correct.sum().float() / total.sum().float(), epoch)

    writer.add_scalar('Train/class_one_Accuracy', class_accuracy[0], epoch)
    writer.add_scalar('Train/class_two_Accuracy', class_accuracy[1], epoch)
    writer.add_scalar('Train/class_three_Accuracy', class_accuracy[2], epoch)
    writer.add_scalar('Train/class_four_Accuracy', class_accuracy[3], epoch)
    writer.add_scalar('Train/class_five_Accuracy', class_accuracy[4], epoch)
    writer.add_scalar('Train/Average_Accuracy', class_accuracy.mean(), epoch)


@torch.no_grad()
def eval_training(epoch=0, tb=True):
    start = time.time()
    net.eval()

    test_loss = 0.0
    # 记录每次epoch,猜对的各类别数量以及测试的各类别数量
    correct = torch.zeros(5, dtype=torch.float).to(device)
    total = torch.zeros(5, dtype=torch.float).to(device)

    for batch_index, data in enumerate(testloader):
        images = data['image'].type(torch.FloatTensor).to(device)
        labels = data['label'].type(torch.LongTensor).to(device)

        outputs = net(images)
        loss = loss_function(outputs, labels)

        test_loss += loss.item()
        _, preds = outputs.max(1)
        # 每个batch相等的类别值
        equal = labels[labels == preds]

        # 统计各类别数量
        correct[0] += (equal == 0).sum()
        total[0] += (labels == 0).sum()

        correct[1] += (equal == 1).sum()
        total[1] += (labels == 1).sum()

        correct[2] += (equal == 2).sum()
        total[2] += (labels == 2).sum()

        correct[3] += (equal == 3).sum()
        total[3] += (labels == 3).sum()

        correct[4] += (equal == 4).sum()
        total[4] += (labels == 4).sum()

        #  指代整个训练过程中batch的index
        n_iter = (epoch - 1) * len(testloader) + batch_index + 1

        writer.add_scalar('Test/batch_loss', loss.item(), n_iter)


    finish = time.time()

    # 计算各类别准确率
    class_accuracy = correct / total

    print(
        'Test set: Epoch:{}, loss:{:.4f}, Total_Accuracy:{:.4f}, Average_Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
            epoch,
            test_loss / len(testloader),
            correct.sum().float() / total.sum().float(),
            class_accuracy.mean(),
            finish - start
        ))
    print()

    # add infomations to tensorboard
    if tb:
        writer.add_scalar('Test/epoch_loss', test_loss / len(testloader), epoch)
        writer.add_scalar('Test/Total_Accuracy', correct.sum().float() / total.sum().float(), epoch)

        writer.add_scalar('Test/class_one_Accuracy', class_accuracy[0], epoch)
        writer.add_scalar('Test/class_two_Accuracy', class_accuracy[1], epoch)
        writer.add_scalar('Test/class_three_Accuracy', class_accuracy[2], epoch)
        writer.add_scalar('Test/class_four_Accuracy', class_accuracy[3], epoch)
        writer.add_scalar('Test/class_five_Accuracy', class_accuracy[4], epoch)
        writer.add_scalar('Test/Average_Accuracy', class_accuracy.mean(), epoch)


    return test_loss / len(testloader)



if __name__ == '__main__':

    # 设置使用显卡id
    device = torch.device("cuda:2")


    # 创建网络结构
    net = resnet().to(device)

    # 加载训练和验证数据
    train_data_sets = ['train.csv']
    test_data_sets = ['test.csv']

    # 加载训练数据
    train_list = read_csv(train_data_sets)
    train_dataset = MyDataset(train_list)
    trainloader = DataLoader(train_dataset, batch_size=BatchSize, shuffle=True, num_workers=2)

    # 加载验证数据
    test_list = read_csv(test_data_sets)
    test_dataset = MyDataset(test_list)
    testloader = DataLoader(test_dataset, batch_size=BatchSize, shuffle=False, num_workers=2)

    # 设置优化器和损失函数
    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9, weight_decay=5e-4)

    # 设置学习率
    # scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[80], gamma=0.1, last_epoch=-1)

    # 创建日志文件存储目录
    if not os.path.exists(LOG_DIR):
        os.makedirs(LOG_DIR)

    # 创建tensorboard 写入
    writer = SummaryWriter(log_dir=LOG_DIR)

    # 创建训练模型存储目录
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{loss}-{type}.pth')

    # 初始化loss为无穷大，用做保存模型判断条件
    origin_loss = float('inf')

    # 训练迭代次数
    for epoch in range(1, 101):

        # scheduler.step(epoch=epoch)

        # 对每个epoch进行训练和验证
        train(epoch)
        val_loss = eval_training(epoch)

        # 保存损失最小模型
        if origin_loss > val_loss:
            weights_path = checkpoint_path.format(net=model_name, epoch=epoch, loss=val_loss, type='best')
            print('saving weights file to {}'.format(weights_path))
            torch.save(net.state_dict(), weights_path)
            origin_loss = val_loss
            continue

        # 每隔*轮保存一次模型
        if not epoch % 5:
            weights_path = checkpoint_path.format(net=model_name, epoch=epoch, loss=val_loss, type='regular')
            print('saving weights file to {}'.format(weights_path))
            torch.save(net.state_dict(), weights_path)

    writer.close()

