import argparse
# from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from dataset.PiFu import PiFu
import socket
from datetime import datetime
import os
from model.BaseNet_V5 import CPFNet, BaseNet_version
from model.unet import UNet
import torch
from torch.utils.tensorboard import SummaryWriter
import tqdm
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from PIL import Image
import logging
import utils.utils as u
import utils.loss as LS
from utils.config import DefaultConfig
import torch.backends.cudnn as cudnn


def train_info(args, name, ):
    file = open("log_info.txt", "a+")
    name = name[7:]
    info = args.BaseNet_version+"___"+"K-fold:" + str(args.k_fold)+"___"+"lr:"+str(args.lr)+\
            "___"+"save:"+args.save_model_path

    zhushi = "6个损失函数,3种损失,数据增强的方式为左右\上下翻转"

    file.write(name + "\n")
    file.write(info + "\n")
    file.write(zhushi + "\n")
    file.write("\n\n")


def val_cpf(args, model, dataloader):
    print('\n')
    print('Start Validation!')
    with torch.no_grad():
        model.eval()
        tbar = tqdm.tqdm(dataloader, desc='\r')

        total_Dice = []
        total_Dice1 = []
        total_Dice2 = []
        total_Dice3 = []
        total_Dice.append(total_Dice1)
        total_Dice.append(total_Dice2)
        total_Dice.append(total_Dice3)
        Acc = []

        cur_cube = []
        cur_label_cube = []
        next_cube = []
        counter = 0
        end_flag = False

        for i, (data, labels) in enumerate(tbar):
            # tbar.update()
            if torch.cuda.is_available() and args.use_gpu:
                data = data.cuda()
                label = labels[0].cuda()
            slice_num = labels[1][0].long().item()

            # get RGB predict image

            aux_predict, predicts = model(data)
            predict = torch.argmax(torch.exp(predicts), dim=1)
            batch_size = predict.size()[0]

            counter += batch_size
            if counter <= slice_num:
                cur_cube.append(predict)
                cur_label_cube.append(label)
                if counter == slice_num:
                    end_flag = True
                    counter = 0
            else:
                last = batch_size - (counter - slice_num)

                last_p = predict[0:last]
                last_l = label[0:last]

                first_p = predict[last:]
                first_l = label[last:]

                cur_cube.append(last_p)
                cur_label_cube.append(last_l)
                end_flag = True
                counter = counter - slice_num

            if end_flag:
                end_flag = False
                predict_cube = torch.stack(cur_cube, dim=0).squeeze()
                label_cube = torch.stack(cur_label_cube, dim=0).squeeze()
                cur_cube = []
                cur_label_cube = []
                if counter != 0:
                    cur_cube.append(first_p)
                    cur_label_cube.append(first_l)

                assert predict_cube.size()[0] == slice_num
                Dice, true_label, acc = u.eval_multi_seg(predict_cube, label_cube, args.num_classes)

                for class_id in range(args.num_classes - 1):
                    if true_label[class_id] != 0:
                        total_Dice[class_id].append(Dice[class_id])
                Acc.append(acc)
                len0 = len(total_Dice[0]) if len(total_Dice[0]) != 0 else 1
                len1 = len(total_Dice[1]) if len(total_Dice[1]) != 0 else 1
                len2 = len(total_Dice[2]) if len(total_Dice[2]) != 0 else 1

                dice1 = sum(total_Dice[0]) / len0
                dice2 = sum(total_Dice[1]) / len1
                dice3 = sum(total_Dice[2]) / len2
                ACC = sum(Acc) / len(Acc)
                mean_dice = (dice1 + dice2 + dice3) / 3.0
                tbar.set_description('Mean_D: %3f, Dice1: %.3f, Dice2: %.3f, Dice3: %.3f, ACC: %.3f' % (
                mean_dice, dice1, dice2, dice3, ACC))
        print('Mean_Dice:', mean_dice)
        print('Dice1:', dice1)
        print('Dice2:', dice2)
        print('Dice3:', dice3)
        print('Acc:', ACC)

        return mean_dice, dice1, dice2, dice3, ACC


def val(args, epoch,  model, dataloader):
    # 函数已修改，适应皮肤分割的单通道输出
    # print()
    # print('Start Validation!')
    # print("dataloader_val size:", len(dataloader))
    dice = precsion = jaccard = 0
    n_val = len(dataloader)  # the number of batch
    with torch.no_grad():
        model.eval()
        tbar = tqdm.tqdm(total=len(dataloader) * args.batch_size_val)
        tbar.set_description('val %d' % epoch)
        for i, (data, labels) in enumerate(dataloader):
            # tbar.update()
            if torch.cuda.is_available() and args.use_gpu:
                data = data.cuda()
                labels = labels.cuda()
            # print("data size:", data.size())
            # print("lables size:", labels.size())
            mask_pred, mask_d1, _, _, _, _ = model(data)
            dice_b, precsion_b, jaccard_b = u.eval_seg(mask_pred, labels)
            dice = dice + dice_b
            precsion = precsion + precsion_b
            jaccard += jaccard_b

            tbar.update(args.batch_size_val)
            tbar.set_postfix(dice='%.6f' % (dice/(i + 1)))  # 平均batch的 dice
        tbar.close()
        print()
    model.train()
    return dice/n_val, precsion/n_val, jaccard/n_val


def train(args, model, optimizer,criterion, dataloader_train, dataloader_val):

    current_time = datetime.now().strftime('%b%d_%H-%M-%S')
    print("当前时间", current_time)


    log_dir = os.path.join(args.log_dirs, args.BaseNet_version+"_"+str(args.k_fold)+'_'+str(args.lr)+'_'+current_time + '_'+socket.gethostname())
    writer = SummaryWriter(log_dir=log_dir)
    print("log_name: ", log_dir)

    train_info(args, log_dir)  # 记录训练信息

    step = 0
    best_pred=0.0

    for epoch in range(args.num_epochs):
        lr = u.adjust_learning_rate(args, optimizer,epoch)
        model.train()

        tq = tqdm.tqdm(total=len(dataloader_train) * args.batch_size)
        tq.set_description('epoch %d, lr %f, b_dice %f' % (epoch, lr, args.b_dice[0]))
        loss_record = []
        train_loss = 0.0
        is_best = False
        for i, (data, label) in enumerate(dataloader_train):
            if torch.cuda.is_available() and args.use_gpu:
                data = data.cuda()
                label = label.cuda().float()
            
            main_out, d1, d2, d3, d4, d5 = model(data)

            # # get weight_map
            # weight_map = torch.zeros(args.num_classes)
            # weight_map = weight_map.cuda()
            # for ind in range(args.num_classes):
            #     weight_map[ind] = 1/(torch.sum((label == ind).float())+1.0)
            # # print(weight_map)

            # print(main_out.shape)
            # print(label.shape)

            # temp_label = label.cpu().detach().numpy()
            # print("daxiao", np.sum(temp_label==0) + np.sum(temp_label==1))
            #                 0                1                2               3
            # criterion = [criterion_aux, criterion_main, criterion_iou, criterion_msssim]

            main_loss_bce = criterion[0](main_out, label)
            main_loss_dice = criterion[1](main_out, label)
            main_loss_iou = criterion[2](main_out, label)
            # main_loss_msssim = criterion[3](main_out, label)
            loss_main = main_loss_bce + main_loss_iou + main_loss_dice

            d1_loss_bce = criterion[0](d1, label)
            d1__loss_dice = criterion[1](d1, label)
            d1_loss_iou = criterion[2](d1, label)
            # d1_loss_msssim = criterion[3](d1, label)
            loss_d1 = d1_loss_bce + d1_loss_iou + d1__loss_dice

            d2_loss_bce = criterion[0](d2, label)
            d2__loss_dice = criterion[1](d2, label)
            d2_loss_iou = criterion[2](d2, label)
            # d2_loss_msssim = criterion[3](d2, label)
            loss_d2 = d2_loss_bce + d2_loss_iou + d2__loss_dice

            d3_loss_bce = criterion[0](d3, label)
            d3__loss_dice = criterion[1](d3, label)
            d3_loss_iou = criterion[2](d3, label)
            # d3_loss_msssim = criterion[3](d3, label)
            loss_d3 = d3_loss_bce + d3_loss_iou + d3__loss_dice

            d4_loss_bce = criterion[0](d4, label)
            d4__loss_dice = criterion[1](d4, label)
            d4_loss_iou = criterion[2](d4, label)
            # d4_loss_msssim = criterion[3](d4, label)
            loss_d4 = d4_loss_bce + d4_loss_iou + d4__loss_dice

            d5_loss_bce = criterion[0](d5, label)
            d5__loss_dice = criterion[1](d5, label)
            d5_loss_iou = criterion[2](d5, label)
            # d5_loss_msssim = criterion[3](d5, label)
            loss_d5 = d5_loss_bce + d5_loss_iou + d5__loss_dice

            loss = loss_main + loss_d1 + loss_d2 + loss_d3 + loss_d4 + loss_d5

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            tq.update(args.batch_size)
            train_loss += loss.item()
            tq.set_postfix(loss='%.6f' % (train_loss/(i+1)))  # 平均一个batch的loss

            # 每10个batch添加一次loss
            step += 1
            if step % 10 == 0:
                writer.add_scalar('Train/loss_step', loss, step)
            loss_record.append(loss.item())

        tq.close()
        loss_train_mean = np.mean(loss_record)
        writer.add_scalar('Train/loss_epoch', float(loss_train_mean), epoch)
        # print('loss for train : %f' % loss_train_mean)
        # continue

        if epoch % args.validation_step == 0:
            dice, precsion, jaccard = val(args, epoch, model, dataloader_val)

            # dice jacd accu epoch
            if dice>args.b_dice[0]:
                args.b_dice = [dice, jaccard, precsion, epoch]

            if jaccard > args.b_jacd[1]:
                args.b_jacd = [dice, jaccard, precsion, epoch]

            if precsion>args.b_accu[2]:
                args.b_accu = [dice, jaccard, precsion, epoch]

            
            writer.add_scalar('Valid/dice', dice, epoch)
            writer.add_scalar('Valid/precsion', precsion, epoch)
            writer.add_scalar('Valid/jaccard', jaccard, epoch)

            is_best = dice > best_pred
            best_pred = max(best_pred, dice)
            checkpoint_dir = args.save_model_path
            # checkpoint_dir=os.path.join(checkpoint_dir_root,str(k_fold))
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)
            checkpoint_latest = os.path.join(checkpoint_dir, 'checkpoint_latest.pth')
            u.save_checkpoint({
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_dice': best_pred,
                    }, best_pred, epoch, is_best, checkpoint_dir, filename=checkpoint_latest)
    print(args.b_dice)
    print(args.b_jacd)
    print(args.b_accu)
    current_time = datetime.now().strftime('%b%d_%H-%M-%S')
    print("当前时间", current_time)


def test(model,dataloader, args):
    print('start test!')
    with torch.no_grad():
        model.eval()
        # precision_record = []
        tq = tqdm.tqdm(dataloader,desc='\r')
        tq.set_description('test')
        comments=os.getcwd().split('/')[-1]
        for i, (data, label_path) in enumerate(tq):
            if torch.cuda.is_available() and args.use_gpu:
                data = data.cuda()
                # label = label.cuda()
            aux_pred,predict = model(data)
            predict=torch.argmax(torch.exp(predict),dim=1)
            pred=predict.data.cpu().numpy()
            pred_RGB=PiFu.COLOR_DICT[pred.astype(np.uint8)]

            for index,item in enumerate(label_path):
                save_img_path=label_path[index].replace('test_mask',comments+'_mask')
                if not os.path.exists(os.path.dirname(save_img_path)):
                    os.makedirs(os.path.dirname(save_img_path))
                img=Image.fromarray(pred_RGB[index].squeeze().astype(np.uint8))
                img.save(save_img_path)
                tq.set_postfix(str=str(save_img_path))
        tq.close()


def main(mode='train', args=None):

    # create dataset and dataloader
    dataset_path = os.path.join(args.data, args.dataset)
    print("dataset_path :", dataset_path)
    dataset_train = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.k_fold, mode='train')
    print("训练集数量大小：", len(dataset_train))

    dataloader_train = DataLoader(
        dataset_train,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=True
    )
    print("训练集一个epoch的batch数量：", len(dataloader_train))

    dataset_val = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.k_fold, mode='val')
    dataloader_val = DataLoader(
        dataset_val,
        # this has to be 1
        # batch_size=len(args.cuda.split(',')),
        batch_size=args.batch_size_val,
        shuffle=False,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=True 
    )
    print("验证集数量大小：", len(dataset_val))
    print("验证集一个epoch的batch数量：", len(dataloader_val))

    # exit(0)
    dataset_test = PiFu(dataset_path, scale=(args.crop_height, args.crop_width), k_fold_test=args.test_fold, mode='test')
    dataloader_test = DataLoader(
        dataset_test,
        # this has to be 1
        # batch_size=len(args.cuda.split(',')),
        batch_size=args.batch_size_test,
        shuffle=False,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=True
    )
    # build model
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda

    # load model
    model_all = {'BaseNet': CPFNet(out_planes=args.num_classes), 'UNet': UNet(in_channels=1, n_classes=args.num_classes)}
    model = model_all[args.net_work]
    cudnn.benchmark = True
    # model._initialize_weights()

    print("torch.cuda.is_available() :", torch.cuda.is_available(), " Use :", args.cuda)
    if torch.cuda.is_available() and args.use_gpu:
        model = torch.nn.DataParallel(model).cuda()

    # load pretrained model if exists
    if args.pretrained_model_path and mode == 'test':
        print("=> loading pretrained model '{}'".format(args.pretrained_model_path))
        checkpoint = torch.load(args.pretrained_model_path)
        model.load_state_dict(checkpoint['state_dict'])
        print('Done!')
        
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # 网络中加sigmoid还是损失中添加
    # criterion_aux = nn.BCEWithLogitsLoss()
    criterion_aux = nn.BCELoss()
    # criterion_main = LS.Multi_DiceLoss(class_num=args.num_classes)
    criterion_main = LS.DiceLoss()
    criterion_iou = LS.IOU()
    criterion_msssim = LS.MSSSIM()
    criterion = [criterion_aux, criterion_main, criterion_iou, criterion_msssim]

    if mode == 'train':
        train(args, model, optimizer, criterion, dataloader_train, dataloader_val)
    if mode == 'test':
        test(model,dataloader_test, args)
    if mode == 'train_test':
        train(args, model, optimizer, criterion, dataloader_train, dataloader_val)
        test(model, dataloader_test, args)


if __name__ == '__main__':
    seed = 1234
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    args = DefaultConfig()
    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    logging.info(f'''Starting training:
         NetWork:         {args.net_work}
         Epochs:          {args.num_epochs}
         Batch size:      {args.batch_size}
         Batch size_val:  {args.batch_size_val}
         Learning rate:   {args.lr}
         Val_k_fold:      {args.k_fold}
         Checkpoints:     {args.save_model_path}
         Device cuda:     {args.cuda}
         ''')
    
    modes = args.mode
    args.BaseNet_version = BaseNet_version

    if modes == 'train':
        main(mode='train', args=args)
    elif modes == 'test':
        main(mode='test', args=args)
    elif modes == 'train_test':
        main(mode='train_test', args=args)


