import os.path
import numpy as np
import torch
from copy import deepcopy
import wandb
from medpy import metric
from tqdm import tqdm
import torch.nn as nn
import random
from scipy.ndimage import zoom
from utils import loadyaml, _get_logger, mk_path, get_current_consistency_weight, DiceLoss, update_ema_variables
from utils import build_lr_scheduler, build_optimizer,DiceLoss_Ignore
from model import build_model
from datasets import build_loader



def main():
    # path = r"config/unimatch_unet_30k_256x256_ACDC.yaml"
    path = r"config/unimatch_segformer_30k_256x256_ACDC.yaml"
    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args = loadyaml(os.path.join(root, path))  # 加载yaml
    if args.cuda:
        args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    else:
        args.device = torch.device("cpu")

    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path)  # 创建文件保存位置
    # 创建 tensorboardX日志保存位置
    mk_path(os.path.join(args.save_path, "tensorboardX"))
    mk_path(os.path.join(args.save_path, "model"))  # 创建模型保存位置
    args.model_save_path = os.path.join(args.save_path, "model", "model.pth")
    args.ema_model_save_path = os.path.join(args.save_path, "model", "ema_model_model.pth")

    wandb.init(
        entity="jokerak777",  # wandb上对应的team名称（必填）,类似于用户名
        project="semi_unimatch",  # wandb上对应的team名称（必填）,
        name="unimatch_segformer",  # 本次实验的名称（可选，如果不设置，wandb会自动生成本次实验名称）
        config=args,
        dir=os.path.join(args.save_path, "tensorboardX")
    )


    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
    args.tqdm = os.path.join(args.save_path, "tqdm.log")
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

    label_loader,unlabel_loader1,unlabel_loader2,test_loader = build_loader(args)  # 构建数据集
    args.epochs = args.total_itrs // args.step_size  # 设置模型epoch
    args.logger.info("==========> label_loader length:{}".format(len(label_loader.dataset)))
    args.logger.info("==========> label_loader length:{}".format(len(unlabel_loader1.dataset)))
    args.logger.info("==========> test_dataloader length:{}".format(len(test_loader.dataset)))
    args.logger.info("==========> epochs length:{}".format(args.epochs))

    # step 1: 构建模型
    model = build_model(args=args).to(device=args.device)  # 创建模型

    wandb.watch(model, log='all', log_freq=1000)

    # step 2: 训练模型
    UniMatch(model, label_loader,unlabel_loader1,unlabel_loader2,test_loader, args)


def UniMatch(model, label_loader,unlabel_loader1,unlabel_loader2,test_loader, args):
    optimizer = build_optimizer(args=args, model=model)
    lr_scheduler = build_lr_scheduler(args=args, optimizer=optimizer)
    # config network and criterion
    criterion_ce = nn.CrossEntropyLoss(ignore_index=255)
    criterion_dice = DiceLoss_Ignore(args.num_classes)

    model.train()
    cur_itrs = 0
    train_loss = 0
    best_dice = 0.0
    best_ema_dice = 0.0

    max_epoch = args.total_itrs // len(label_loader)
    pbar = tqdm(total=args.total_itrs)
    args.logger.info("==========> max_epoch:{}".format(max_epoch))

    while True:
        for     ((img_x, mask_x),
                (img_u_w, img_u_s1, img_u_s2, cutmix_box1, cutmix_box2),
                (img_u_w_mix, img_u_s1_mix, img_u_s2_mix, _, _)) in zip(label_loader,unlabel_loader1,unlabel_loader2):
            
            pbar.update(1)
            cur_itrs += 1
            img_x, mask_x = img_x.cuda(), mask_x.cuda().long() # 标注数据
            img_u_w = img_u_w.cuda() # 弱增广未标注数据
            img_u_s1, img_u_s2 = img_u_s1.cuda(), img_u_s2.cuda() # 强增广未标注数据s1,强增广未标注数据s2
            cutmix_box1, cutmix_box2 = cutmix_box1.cuda(), cutmix_box2.cuda() # cutmix掩码
            img_u_w_mix = img_u_w_mix.cuda() # 另一组实验
            img_u_s1_mix, img_u_s2_mix = img_u_s1_mix.cuda(), img_u_s2_mix.cuda() # 另一组实验

            with torch.no_grad():
                model.eval()

                pred_u_w_mix = model(img_u_w_mix).detach() # 弱无标注数据 
                conf_u_w_mix = pred_u_w_mix.softmax(dim=1).max(dim=1)[0]
                mask_u_w_mix = pred_u_w_mix.argmax(dim=1)

            # 进行cutMix操作
            img_u_s1[cutmix_box1.unsqueeze(1).expand(img_u_s1.shape) == 1] = img_u_s1_mix[cutmix_box1.unsqueeze(1).expand(img_u_s1.shape) == 1]
            img_u_s2[cutmix_box2.unsqueeze(1).expand(img_u_s2.shape) == 1] = img_u_s2_mix[cutmix_box2.unsqueeze(1).expand(img_u_s2.shape) == 1]

            model.train()

            num_lb, num_ulb = img_x.shape[0], img_u_w.shape[0] # 有标注图像的shape,无标注图像的shape

            preds, preds_fp = model(torch.cat((img_x, img_u_w)), True) # 
            pred_x, pred_u_w = preds.split([num_lb, num_ulb])
            pred_u_w_fp = preds_fp[num_lb:] # fp 是图像扰动后的结果

            pred_u_s1, pred_u_s2 = model(torch.cat((img_u_s1, img_u_s2))).chunk(2) # 2组无标注数据

            pred_u_w = pred_u_w.detach() 
            conf_u_w = pred_u_w.softmax(dim=1).max(dim=1)[0] # 弱监督数据的
            mask_u_w = pred_u_w.argmax(dim=1)

            mask_u_w_cutmixed1, conf_u_w_cutmixed1 = mask_u_w.clone(), conf_u_w.clone()
            mask_u_w_cutmixed2, conf_u_w_cutmixed2 = mask_u_w.clone(), conf_u_w.clone()

            mask_u_w_cutmixed1[cutmix_box1 == 1] = mask_u_w_mix[cutmix_box1 == 1]
            conf_u_w_cutmixed1[cutmix_box1 == 1] = conf_u_w_mix[cutmix_box1 == 1]

            mask_u_w_cutmixed2[cutmix_box2 == 1] = mask_u_w_mix[cutmix_box2 == 1]
            conf_u_w_cutmixed2[cutmix_box2 == 1] = conf_u_w_mix[cutmix_box2 == 1]

            loss_x = (criterion_ce(pred_x, mask_x) + criterion_dice(pred_x.softmax(dim=1), mask_x.unsqueeze(1).float())) / 2.0

            loss_u_s1 = criterion_dice(pred_u_s1.softmax(dim=1), mask_u_w_cutmixed1.unsqueeze(1).float(),ignore=(conf_u_w_cutmixed1 < args.conf_thresh).float())
            
            loss_u_s2 = criterion_dice(pred_u_s2.softmax(dim=1), mask_u_w_cutmixed2.unsqueeze(1).float(),ignore=(conf_u_w_cutmixed2 < args.conf_thresh).float())
            
            loss_u_w_fp = criterion_dice(pred_u_w_fp.softmax(dim=1), mask_u_w.unsqueeze(1).float(),ignore=(conf_u_w < args.conf_thresh).float())
            
            loss = (loss_x + loss_u_s1 * 0.25 + loss_u_s2 * 0.25 + loss_u_w_fp * 0.5) / 2.0

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            lr = args.lr * (1.0 - cur_itrs / args.total_itrs) ** 0.9
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

            wandb.log({
                "UniMatch/loss": loss.item(),
                "UniMatch/lr": lr,
            })
            if cur_itrs % args.step_size == 0:
                dice, hd95 = test_acdc(model=model, test_loader=test_loader, args=args, name="test_model")

                wandb.log({
                    'model_dice': dice,
                    'model_hd95': hd95,
                })
                args.logger.info("val [{}/{} ({:.0f}%)]\t model dice:{:.4f} \t hd95:{:.4f} ".format(cur_itrs, args.total_itrs,
                                                                                                    100. * cur_itrs / args.total_itrs,
                                                                                                    dice, hd95))

                if dice > best_dice:
                    best_dice = dice
                    torch.save({
                        "cur_itrs": cur_itrs,
                        "best_dice": best_dice,
                        "model": model.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "lr_scheduler": lr_scheduler.state_dict(),
                    }, args.model_save_path)

                args.logger.info("best model dice:{:.4f}".format(best_dice))

                model.train()

            if cur_itrs > args.total_itrs:
                return

    # do this after training
    wandb.finish()


def test_acdc(model, test_loader, args, name):

    model.eval()
    metric_list = 0.0
    for i_batch, sampled_batch in enumerate(test_loader):
        image = sampled_batch[0].to(args.device)
        label = sampled_batch[1].to(args.device)
        metric_i = test_single_volume(image, label, model, classes=args.num_classes, patch_size=args.test_crop_size)
        metric_list += np.array(metric_i)

        if i_batch == 0:
            slice = image[0, 0, :, :].cpu().detach().numpy()
            x, y = slice.shape[0], slice.shape[1]
            slice = zoom(slice, (args.test_crop_size[0] / x, args.test_crop_size[1] / y), order=0)
            img = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()
            label_pred = torch.argmax(torch.softmax(model(img), dim=1), dim=1, keepdim=False).squeeze(0)
            label_pred = label_pred.cpu().detach().numpy()
            label_pred = zoom(label_pred, (x / args.test_crop_size[0], y / args.test_crop_size[1]), order=0)
            label_pred = test_loader.dataset.label_to_img(label_pred)

            label_true = label[0, 0, :, :].squeeze().cpu().detach().numpy()
            label_true = test_loader.dataset.label_to_img(label_true)

            Img = wandb.Image(img[0])
            label_pred = wandb.Image(label_pred)
            label_true = wandb.Image(label_true)

            wandb.log({"{}/Image".format(name): Img, "{}/label_pred".format(name): label_pred, "{}/label_true".format(name): label_true})

    metric_list = metric_list / len(test_loader.dataset)
    performance2 = np.mean(metric_list, axis=0)[0]
    mean_hd952 = np.mean(metric_list, axis=0)[1]
    return performance2, mean_hd952


def test_single_volume(image, label, net, classes, patch_size=[256, 256]):
    image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
    prediction = np.zeros_like(label)
    for ind in range(image.shape[0]):
        slice = image[ind, :, :]
        x, y = slice.shape[0], slice.shape[1]
        slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=0)
        input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()
        net.eval()
        with torch.no_grad():
            out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
            out = out.cpu().detach().numpy()
            pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
            prediction[ind] = pred
    metric_list = []
    for i in range(1, classes):
        metric_list.append(calculate_metric_percase(prediction == i, label == i))
    return metric_list


def calculate_metric_percase(pred, gt):
    pred[pred > 0] = 1
    gt[gt > 0] = 1
    if pred.sum() > 0:
        dice = metric.binary.dc(pred, gt)
        hd95 = metric.binary.hd95(pred, gt)
        return dice, hd95

    elif pred.sum() > 0 and gt.sum() == 0:
        return 1, 0
    else:
        return 0, 0


if __name__ == '__main__':
    main()
