import os
import time
import scipy.ndimage
import datetime
import pandas as pd
import numpy as np

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
from net_five_3d_manifold_remix_other import generate_model, device
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import MinMaxScaler

model_name = 'five_other_manifold_3d_0.2'
DATA_FORMAT = '%Y%m%d_%H%M%S'
TIME_NOW = datetime.datetime.now().strftime(DATA_FORMAT)
checkpoint_path = '/home/zmy/pytorch_code/checkpoint/'+model_name+'_'+TIME_NOW
LOG_DIR = '/home/zmy/pytorch_code/logdir/'+model_name+'_'+TIME_NOW


BatchSize = 16
Alpha = 0.2

# 自定义数据集和数据预处理
class MyDataset(Dataset):

    def __init__(self, datalist):
        self.data_info = datalist


    def __len__(self):
        return len(self.data_info)

    def __getitem__(self, item):
        patientID = self.data_info[item][0]
        label = self.data_info[item][1]
        img_path = self.data_info[item][2]

        img = np.load(img_path)

        # 归一化
        patientWeight = self.data_info[item][3] / 200.0
        patientSex = self.data_info[item][4]
        patientAge = self.data_info[item][5] / 100.0
        patientSize = self.data_info[item][6] / 200.0

        other = [patientWeight, patientSex, patientAge, patientSize]
        other = np.asarray(other, dtype=np.float)



        return {'image': torch.from_numpy(img), 'other': torch.from_numpy(other), 'label': torch.tensor(label)}


# 读取文件列表
def read_csv(data_sets):
    sets_path = '/data1/zmy/data2021/origin_data/divide_csv/five/'

    # 读取数据集
    data_features = []

    for set in data_sets:
        train_data = pd.read_csv(sets_path+set)
        for j in range(len(train_data)):

            cancer_type = int(train_data['cancer_type'][j]) - 1

            patientid = train_data['patientID'][j]
            img_path = '/data1/zmy/data2021/auge2_data/img_3D/' + str(patientid) + '/img3d.npy'
            patientWeight = train_data['patientWeight'][j]
            patientSex = train_data['patientSex'][j]
            patientAge = train_data['patientAge'][j]
            patientSize = train_data['patientSize'][j]

            one_feature = [patientid, cancer_type, img_path,
                           patientWeight, patientSex, patientAge, patientSize]

            data_features.append(one_feature)


    return data_features



# 创建网络
def resnet():

    # 创建resnet50网络
    net = generate_model(50)

    # 打印网络结构和参数量
    print(net)
    print("Total number of paramerters in networks is {}  ".format(sum(x.numel() for x in net.parameters())))

    return net

# mixup之后的损失函数
def mixup_criterion(criterion, pred, y_a, y_b, lam):
    return lam*criterion(pred, y_a)+(1-lam)*criterion(pred, y_b)

def train(epoch):
    start = time.time()
    net.train()

    epoch_loss = 0.0
    correct = 0.0
    total = 0

    for batch_index, data in enumerate(trainloader):


        images = data['image'].type(torch.FloatTensor).to(device)
        other = data['other'].type(torch.FloatTensor).to(device)
        labels = data['label'].type(torch.LongTensor).to(device)


        # images, labels = map(Variable, (images, labels))

        feature, outputs, labels_a, labels_b, lam = net(images, other, labels, use_mixup=True, mixup_alpha=Alpha, layer_mix=None,
                                               mix_type="manifold_mixup")

        print('feature mean:', feature.mean())
        print('feature std: ', feature.std())

        print('other mean: ', other.mean())
        print('other std: ', other.std())



        loss = mixup_criterion(loss_function, outputs, labels_a, labels_b, lam)

        epoch_loss += loss.item()

        _, preds = outputs.max(1)

        total += outputs.size(0)

        correct += (lam * preds.eq(labels_a).to(device).sum().float()
                    + (1 - lam) * preds.eq(labels_b).to(device).sum().float())

        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # scheduler.step(epoch=epoch)

        #  指代整个训练过程中batch的index
        n_iter = (epoch - 1) * len(trainloader) + batch_index + 1

        print('Training Epoch: {epoch}[{trained_samples}/{total_samples}]\tLR:{:0.6f}\tLoss:{:0.4f}'.format(
            optimizer.param_groups[0]['lr'],
            loss.item(),
            epoch=epoch,
            trained_samples=batch_index * BatchSize + len(images),
            total_samples=len(trainloader.dataset)
        ))

        # update training loss for each iteration
        writer.add_scalar('Train/batch_loss', loss.item(), n_iter)

    finish = time.time()
    print('Train Epoch: {}, loss: {:.4f}, Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
        epoch,
        epoch_loss / len(trainloader),
        correct.float() / total,
        finish - start
    ))

    writer.add_scalar('Train/epoch_loss', epoch_loss / len(trainloader), epoch)
    writer.add_scalar('Train/Accuracy', correct.float() / total, epoch)


@torch.no_grad()
def eval_training(epoch=0, tb=True):
    start = time.time()
    net.eval()

    test_loss = 0.0
    # 记录每次epoch,猜对的各类别数量以及测试的各类别数量
    correct = torch.zeros(5, dtype=torch.float).to(device)
    total = torch.zeros(5, dtype=torch.float).to(device)

    y_preds_ones = []  # 预测为腺癌的概率
    y_true = []


    for batch_index, data in enumerate(testloader):
        images = data['image'].type(torch.FloatTensor).to(device)
        other = data['other'].type(torch.FloatTensor).to(device)
        labels = data['label'].type(torch.LongTensor).to(device)

        feature, outputs = net(images, other, labels, use_mixup=False, mixup_alpha=Alpha, layer_mix=None)

        loss = loss_function(outputs, labels)

        test_loss += loss.item()

        outputs = torch.softmax(outputs, dim=1)
        result = outputs.tolist()
        for mm in range(len(result)):
            y_preds_ones.append(result[mm][1])
        y_true.extend(labels.tolist())


        _, preds = outputs.max(1)
        # 每个batch相等的类别值
        equal = labels[labels == preds]

        # 统计各类别数量
        correct[0] += (equal == 0).sum()
        total[0] += (labels == 0).sum()

        correct[1] += (equal == 1).sum()
        total[1] += (labels == 1).sum()

        correct[2] += (equal == 2).sum()
        total[2] += (labels == 2).sum()

        correct[3] += (equal == 3).sum()
        total[3] += (labels == 3).sum()

        correct[4] += (equal == 4).sum()
        total[4] += (labels == 4).sum()


        #  指代整个训练过程中batch的index
        n_iter = (epoch - 1) * len(testloader) + batch_index + 1

        writer.add_scalar('Test/batch_loss', loss.item(), n_iter)

    finish = time.time()

    # 计算各类别准确率
    class_accuracy = correct / total

    # 计算auc
    # fpr, tpr, threshold = roc_curve(y_true, y_preds_ones)
    # roc_auc = auc(fpr, tpr)


    print(
        'Test set: Epoch:{}, loss:{:.4f}, Total_Accuracy:{:.4f}, Average_Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
            epoch,
            test_loss / len(testloader),
            correct.sum().float() / total.sum().float(),
            class_accuracy.mean(),
            finish - start
        ))

    # print('test auc: {:.4f}'.format(roc_auc))

    print()

    # add infomations to tensorboard
    if tb:
        writer.add_scalar('Test/epoch_loss', test_loss / len(testloader), epoch)
        # writer.add_scalar('Test/auc', roc_auc, epoch)

        writer.add_scalar('Test/Total_Accuracy', correct.sum().float() / total.sum().float(), epoch)

        writer.add_scalar('Test/class_one_Accuracy', class_accuracy[0], epoch)
        writer.add_scalar('Test/class_two_Accuracy', class_accuracy[1], epoch)

        writer.add_scalar('Test/class_three_Accuracy', class_accuracy[2], epoch)
        writer.add_scalar('Test/class_four_Accuracy', class_accuracy[3], epoch)
        writer.add_scalar('Test/class_five_Accuracy', class_accuracy[4], epoch)

        writer.add_scalar('Test/Average_Accuracy', class_accuracy.mean(), epoch)

    return test_loss / len(testloader)

if __name__ == '__main__':

    # 设置使用显卡id
    # device = torch.device("cuda:0")


    # 创建网络结构
    net = resnet().to(device)

    # 加载训练和验证数据
    train_data_sets = ['train.csv']
    test_data_sets = ['test.csv']

    # 加载训练数据
    train_list = read_csv(train_data_sets)
    train_dataset = MyDataset(train_list)
    trainloader = DataLoader(train_dataset, batch_size=BatchSize, shuffle=True, num_workers=2)

    # 加载验证数据
    test_list = read_csv(test_data_sets)
    test_dataset = MyDataset(test_list)
    testloader = DataLoader(test_dataset, batch_size=BatchSize, shuffle=False, num_workers=2)

    # 设置损失函数
    loss_function = nn.CrossEntropyLoss()

    # 设置优化器
    # optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9, weight_decay=5e-4)
    optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)

    # 设置学习率
    # scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50], gamma=0.1, last_epoch=-1)

    # 创建日志文件存储目录
    if not os.path.exists(LOG_DIR):
        os.makedirs(LOG_DIR)

    # 创建tensorboard 写入
    writer = SummaryWriter(log_dir=LOG_DIR)

    # 创建训练模型存储目录
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{loss}-{type}.pth')

    # 初始化loss为无穷大，用做保存模型判断条件
    origin_loss = float('inf')

    # 训练迭代次数
    for epoch in range(1, 201):

        # scheduler.step(epoch=epoch)

        # 对每个epoch进行训练和验证
        train(epoch)
        val_loss = eval_training(epoch)

        # # 保存损失最小模型
        # if origin_loss > val_loss:
        #     weights_path = checkpoint_path.format(net=model_name, epoch=epoch, loss=val_loss, type='best')
        #     print('saving weights file to {}'.format(weights_path))
        #     torch.save(net.state_dict(), weights_path)
        #     origin_loss = val_loss
        #     continue

        # 每隔*轮保存一次模型
        if not epoch % 1:
            weights_path = checkpoint_path.format(net=model_name, epoch=epoch, loss=val_loss, type='regular')
            print('saving weights file to {}'.format(weights_path))
            torch.save(net.state_dict(), weights_path)

    writer.close()

