import os
import time
import datetime
import pandas as pd
import numpy as np

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
from two_triplet_resnet import resnet50
from sklearn.metrics import roc_curve, auc


model_name = 'two_tripletloss_oversample_0.1'
DATA_FORMAT = '%Y%m%d_%H%M%S'
TIME_NOW = datetime.datetime.now().strftime(DATA_FORMAT)
checkpoint_path = '/home/zmy/pytorch_code/checkpoint/'+model_name+'_'+TIME_NOW
LOG_DIR = '/home/zmy/pytorch_code/logdir/'+model_name+'_'+TIME_NOW


BatchSize = 16
tri_loss_weight = 0.1

# 自定义数据集和数据预处理
class MyDataset(Dataset):

    def __init__(self, datalist):
        self.data_info = datalist


    def __len__(self):
        return len(self.data_info)

    def __getitem__(self, item):
        patientID = self.data_info[item][0]
        label = self.data_info[item][1]
        ct_path = self.data_info[item][2]
        pet_path = self.data_info[item][3]
        pet_slope = self.data_info[item][4]
        pet_intercept = self.data_info[item][5]

        ct = np.load(ct_path)
        pet = np.load(pet_path)

        # pet图像转化HU值
        if pet_slope != 1:
            pet = pet * pet.astype(np.float64)
            pet = pet.astype(np.int32)
        pet += np.int32(pet_intercept)

        # pet图像归一化
        pet = MaxMinNormalizer(pet)

        # ct和pet进行合并
        img = merge_CT_PET(ct, pet)

        return {'image': torch.from_numpy(img), 'label': torch.tensor(label)}


# 读取文件列表
def read_csv(data_sets, istrain=True):
    sets_path = '/data1/zmy/data2021/auge2_data/'

    # 读取数据集
    data_features = []

    for set in data_sets:
        train_data = pd.read_csv(sets_path+set)
        for j in range(len(train_data)):

            # 读取文件地址

            if istrain:
                patientid = train_data['patientID'][j]
                ct_name = train_data['CTSlice_Path'][j]
                ct_path = '/data1/zmy/data2021/auge2_data/'+ct_name

                pet_name = train_data['PETSlice_Path'][j]
                pet_path = '/data1/zmy/data2021/auge2_data/'+pet_name
                pet_slope = float(train_data['pet_slope'][j])
                pet_intercept = float(train_data['pet_intercept'][j])
            else:
                patientid = train_data['patientID'][j]
                ct_name = train_data['CTSlice_Path'][j]
                ct_path = '/data1/zmy/data2021/origin_data/' + ct_name

                pet_name = train_data['PETSlice_Path'][j]
                pet_path = '/data1/zmy/data2021/origin_data/' + pet_name
                pet_slope = float(train_data['pet_slope'][j])
                pet_intercept = float(train_data['pet_intercept'][j])


            cancer_type = int(train_data['cancer_type'][j])-1
            if cancer_type==0 or cancer_type==1:
                one_feature = [patientid, cancer_type,
                               ct_path, pet_path, pet_slope, pet_intercept]

                data_features.append(one_feature)


    return data_features



# 图像归一化
def MaxMinNormalizer(data):
    data_max = np.max(data)
    data_min = np.min(data)
    data_normalize = 1 - (data - data_min) / (data_max - data_min)
    return data_normalize


# CT和PET融合
def merge_CT_PET(ct_array, pet_array):

    img = np.asarray([ct_array, pet_array], dtype=np.float)
    return img


# L2正则化输出
def norml2_featuremap(features):
    norm = torch.norm(features, p=2, dim=1)

    norm_out = torch.zeros_like(features)

    for i in range(norm.size()[0]):
        norm_out[i] = features[i] / norm[i]

    return norm_out

# 随机选择pos和anchor
def get_random_triplet(features, labels):
    '''
    随机选择pos和neg
    :param features: batch x output
    :param labels: batch x labels
    :return: batch x anchor、batch x pos、batch x neg
    '''
    batch_size = labels.size()[0]

    anchor = []
    pos = []
    neg = []



    for i in range(0, batch_size):
        pos_index = []
        neg_index = []
        for j in range(0, batch_size):
            if i == j:
                continue
            elif labels[i] == labels[j]:
                pos_index.append(j)
            elif labels[i] != labels[j]:
                neg_index.append(j)
        if len(pos_index)<1 or len(neg_index)<1:  # 不存在同类别样本或者不存在其他类别样本
            continue
        else:
            anchor.append(features[i].unsqueeze(0))
            pos.append(features[pos_index[0]].unsqueeze(0))
            neg.append(features[neg_index[0]].unsqueeze(0))

    if len(anchor) == 0:
        return None, None, None
    anchor = torch.cat(anchor)
    pos = torch.cat(pos)
    neg = torch.cat(neg)

    # print('triplet size: ', anchor.size())

    return anchor, pos, neg


def get_hard_triplet(features, labels):
    '''
    选择间隔最大的pos和间隔最小的neg样本
    :param features: batch x output
    :param labels: batch x labels
    :return: batch x anchor、batch x pos、batch x neg
    '''
    batch_size = labels.size()[0]

    anchor = []
    pos = []
    neg = []

    for i in range(0, batch_size):
        pos_index = []
        neg_index = []
        for j in range(0, batch_size):
            if i == j:
                continue
            elif labels[i] == labels[j]:
                pos_index.append(j)
            elif labels[i] != labels[j]:
                neg_index.append(j)
        if len(pos_index) < 1 or len(neg_index) < 1:  # 不存在同类别样本或者不存在其他类别样本
            continue
        else:
            anchor.append(features[i].unsqueeze(0))

            hard_pos = get_hard_pos(features[i], features[pos_index])
            pos.append(hard_pos.unsqueeze(0))

            hard_neg = get_hard_neg(features[i], features[neg_index])
            neg.append(hard_neg.unsqueeze(0))

    if len(anchor) == 0:
        return None, None, None
    anchor = torch.cat(anchor)
    pos = torch.cat(pos)
    neg = torch.cat(neg)

    # print('triplet size: ', anchor.size())

    return anchor, pos, neg


def get_hard_pos(anchor, pos_features):
    '''

    :param anchor: one ahchor feature
    :param pos_features: pos_index number pos features
    :return: hard_pos
    '''
    hard_pos_index = 0
    max_distance = 0

    hard_pos = torch.zeros_like(anchor)

    for i in range(1, pos_features.size()[0]):
        dis = distance(anchor, pos_features[i])
        if dis.item() > max_distance:
            max_distance = dis.item()
            hard_pos_index = i

    hard_pos = pos_features[hard_pos_index]

    return hard_pos


def get_hard_neg(anchor, neg_features):
    '''

    :param anchor: one ahchor feature
    :param pos_features: pos_index number pos features
    :return: hard_pos
    '''
    hard_neg_index = 0
    min_distance = float('inf')

    hard_neg = torch.zeros_like(anchor)

    for i in range(1, neg_features.size()[0]):
        dis = distance(anchor, neg_features[i])
        if dis.item() < min_distance:
            min_distance = dis.item()
            hard_neg_index = i

    hard_neg = neg_features[hard_neg_index]

    return hard_neg


def distance(a, b):
    '''

    :param a: anchor
    :param b: pos or neg
    :return: distance
    '''
    re = ((a-b) * (a-b)).sum().sqrt()
    return re


# 创建网络
def resnet():

    # 创建resnet50网络
    net = resnet50()
    net.conv1 = nn.Conv2d(2, 64, kernel_size=7, stride=2, padding=3, bias=False)
    # 打印网络结构和参数量
    print(net)
    print("Total number of paramerters in networks is {}  ".format(sum(x.numel() for x in net.parameters())))

    return net


def train(epoch):
    start = time.time()
    net.train()

    epoch_loss = 0.0
    epoch_triplet_loss = 0.0
    epoch_softmax_loss = 0.0

    # 记录每次epoch,猜对的各类别数量以及训练的各类别数量
    correct = torch.zeros(2, dtype=torch.float).to(device)
    total = torch.zeros(2, dtype=torch.float).to(device)

    for batch_index, data in enumerate(trainloader):
        images = data['image'].type(torch.FloatTensor).to(device)
        labels = data['label'].type(torch.LongTensor).to(device)

        optimizer.zero_grad()
        out1, out2 = net(images)

        norm_out1 = norml2_featuremap(out1)
        a, p, n = get_hard_triplet(norm_out1, labels)
        if a is None:
            loss = softmax_loss(out2, labels)

            epoch_loss += loss.item()
            epoch_softmax_loss += loss.item()
            epoch_triplet_loss += 0
        else:
            loss1 = softmax_loss(out2, labels)
            loss2 = tri_loss_weight*triplet_loss(a, p, n)
            loss = loss1+loss2

            epoch_loss += loss.item()
            epoch_softmax_loss += loss1.item()
            epoch_triplet_loss += loss2.item()

        loss.backward()
        optimizer.step()


        _, preds = out2.max(1)

        # 每个batch相等的类别值
        equal = labels[labels == preds]

        # 统计各类别数量
        correct[0] += (equal == 0).sum()
        total[0] += (labels == 0).sum()

        correct[1] += (equal == 1).sum()
        total[1] += (labels == 1).sum()




        #  指代整个训练过程中batch的index
        n_iter = (epoch-1)*len(trainloader)+batch_index+1

        print('Training Epoch: {epoch}[{trained_samples}/{total_samples}]\tLR:{:0.6f}\tLoss:{:0.4f}'.format(
            optimizer.param_groups[0]['lr'],
            loss.item(),
            epoch=epoch,
            trained_samples=batch_index * BatchSize + len(images),
            total_samples=len(trainloader.dataset)
        ))

        # update training loss for each iteration
        writer.add_scalar('Train/batch_loss', loss.item(), n_iter)


    finish = time.time()

    # 计算各类别准确率
    class_accuracy = correct / total

    print('Train Epoch: {}, loss: {:.4f}, Total_Accuracy: {:.4f}, Average_Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
        epoch,
        epoch_loss / len(trainloader),
        correct.sum().float() / total.sum().float(),
        class_accuracy.mean(),
        finish-start
    ))

    writer.add_scalar('Train/epoch_loss', epoch_loss / len(trainloader), epoch)
    writer.add_scalar('Train/epoch_softmax_loss', epoch_softmax_loss / len(trainloader), epoch)
    writer.add_scalar('Train/epoch_triplet_loss', epoch_triplet_loss / len(trainloader), epoch)
    writer.add_scalar('Train/Total_Accuracy', correct.sum().float() / total.sum().float(), epoch)


    writer.add_scalar('Train/class_one_Accuracy', class_accuracy[0], epoch)
    writer.add_scalar('Train/class_two_Accuracy', class_accuracy[1], epoch)
    writer.add_scalar('Train/Average_Accuracy', class_accuracy.mean(), epoch)


@torch.no_grad()
def eval_training(epoch=0, tb=True):
    start = time.time()
    net.eval()

    test_loss = 0.0
    epoch_triplet_loss = 0.0
    epoch_softmax_loss = 0.0

    # 记录每次epoch,猜对的各类别数量以及测试的各类别数量
    correct = torch.zeros(2, dtype=torch.float).to(device)
    total = torch.zeros(2, dtype=torch.float).to(device)

    y_preds_ones = []  # 预测为腺癌的概率
    y_true = []


    for batch_index, data in enumerate(testloader):
        images = data['image'].type(torch.FloatTensor).to(device)
        labels = data['label'].type(torch.LongTensor).to(device)

        out1, out2 = net(images)
        norm_out1 = norml2_featuremap(out1)
        a, p, n = get_random_triplet(norm_out1, labels)
        if a is None:
            loss = softmax_loss(out2, labels)

            test_loss += loss.item()
            epoch_softmax_loss += loss.item()
            epoch_triplet_loss += 0

        else:
            loss1 = softmax_loss(out2, labels)
            loss2 = tri_loss_weight * triplet_loss(a, p, n)
            loss = loss1 + loss2

            test_loss += loss.item()
            epoch_softmax_loss += loss1.item()
            epoch_triplet_loss += loss2.item()

        outputs = torch.softmax(out2, dim=1)
        result = outputs.tolist()
        for mm in range(len(result)):
            y_preds_ones.append(result[mm][1])
        y_true.extend(labels.tolist())

        _, preds = out2.max(1)

        # 每个batch相等的类别值
        equal = labels[labels == preds]

        # 统计各类别数量
        correct[0] += (equal == 0).sum()
        total[0] += (labels == 0).sum()

        correct[1] += (equal == 1).sum()
        total[1] += (labels == 1).sum()


        #  指代整个训练过程中batch的index
        n_iter = (epoch - 1) * len(testloader) + batch_index + 1

        writer.add_scalar('Test/batch_loss', loss.item(), n_iter)


    finish = time.time()

    # 计算各类别准确率
    class_accuracy = correct / total

    # 计算auc
    fpr, tpr, threshold = roc_curve(y_true, y_preds_ones)
    roc_auc = auc(fpr, tpr)


    print('Test set: Epoch:{}, loss:{:.4f}, Total_Accuracy:{:.4f}, Average_Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
        epoch,
        test_loss / len(testloader),
        correct.sum().float() / total.sum().float(),
        class_accuracy.mean(),
        finish-start
    ))
    print('test auc: {:.4f}'.format(roc_auc))
    print()

    # add infomations to tensorboard
    if tb:
        writer.add_scalar('Test/epoch_loss', test_loss/len(testloader), epoch)
        writer.add_scalar('Test/auc', roc_auc, epoch)
        writer.add_scalar('Test/epoch_softmax_loss', epoch_softmax_loss / len(testloader), epoch)
        writer.add_scalar('Test/epoch_triplet_loss', epoch_triplet_loss / len(testloader), epoch)
        writer.add_scalar('Test/Total_Accuracy', correct.sum().float() / total.sum().float(), epoch)


        writer.add_scalar('Test/class_one_Accuracy', class_accuracy[0], epoch)
        writer.add_scalar('Test/class_two_Accuracy', class_accuracy[1], epoch)
        writer.add_scalar('Test/Average_Accuracy', class_accuracy.mean(), epoch)

    return test_loss / len(testloader)




if __name__ == '__main__':

    # 设置使用显卡id
    device = torch.device("cuda:1")


    # 创建网络结构
    net = resnet().to(device)

    # 加载训练和验证数据
    train_data_sets = ['compose_train.csv']  # 使用未增广的腺癌和其他四类增广的数据进行训练
    test_data_sets = ['test.csv']

    # 加载训练数据
    train_list = read_csv(train_data_sets)
    train_dataset = MyDataset(train_list)
    trainloader = DataLoader(train_dataset, batch_size=BatchSize, shuffle=True, num_workers=2)

    # 加载验证数据
    test_list = read_csv(test_data_sets)
    test_dataset = MyDataset(test_list)
    testloader = DataLoader(test_dataset, batch_size=BatchSize, shuffle=False, num_workers=2)

    # 设置优化器和损失函数
    softmax_loss = nn.CrossEntropyLoss()
    triplet_loss = torch.nn.TripletMarginLoss(reduction='mean')
    # optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9, weight_decay=5e-4)
    optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)

    # 设置学习率
    # scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50, 80], gamma=0.1, last_epoch=-1)

    # 创建日志文件存储目录
    if not os.path.exists(LOG_DIR):
        os.makedirs(LOG_DIR)

    # 创建tensorboard 写入
    writer = SummaryWriter(log_dir=LOG_DIR)

    # 创建训练模型存储目录
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{loss}-{type}.pth')

    # 初始化loss为无穷大，用做保存模型判断条件
    origin_loss = float('inf')

    # 训练迭代次数
    for epoch in range(1, 101):

        # scheduler.step(epoch=epoch)

        # 对每个epoch进行训练和验证
        train(epoch)
        val_loss = eval_training(epoch)

        # 保存损失最小模型
        # if origin_loss > val_loss:
        #     weights_path = checkpoint_path.format(net=model_name, epoch=epoch, loss=val_loss, type='best')
        #     print('saving weights file to {}'.format(weights_path))
        #     torch.save(net.state_dict(), weights_path)
        #     origin_loss = val_loss
        #     continue

        # 每隔*轮保存一次模型
        if not epoch % 1:
            weights_path = checkpoint_path.format(net=model_name, epoch=epoch, loss=val_loss, type='regular')
            print('saving weights file to {}'.format(weights_path))
            torch.save(net.state_dict(), weights_path)

    writer.close()

