import numpy as np
import torch
from medpy import metric
from scipy.ndimage import zoom
from utils import PolyLR, Medical_Metric, DiceLoss
import torch.nn as nn
import os
from tqdm import tqdm
from utils import get_lr_scheduler,get_optimizer

def supervise(model:nn.Module, train_loader, test_loader, args):
    optimizer=get_optimizer(args=args,model=model)

    lr_scheduler = get_lr_scheduler(args=args,optimizer=optimizer)

    max_epoch = args.total_itrs // len(train_loader) + 1
    args.logger.info("==============> max_epoch :{}".format(max_epoch))

    # config network and criterion
    criterion = nn.CrossEntropyLoss(ignore_index=255)
    dice_loss = DiceLoss(args.num_classes)
    
    cur_itrs = 0
    train_loss = 0.0
    best_dice = 0.0

    #  加载原模型
    if args.ckpt is not None and os.path.isfile(args.ckpt):
        state_dict = torch.load(args.ckpt)
        cur_itrs = state_dict["cur_itrs"]
        model = state_dict["model"]
        optimizer = state_dict["optimizer"]
        # lr_scheduler = state_dict["scheduler"]
        best_dice = state_dict["best_score"]
    
    model.train()

    pbar = tqdm(total=args.total_itrs)

    while True:
        for (img_labeled, target_label) in train_loader:
            cur_itrs += 1
            pbar.update(1)
            img_labeled = img_labeled.to(args.device).float()
            target_label = target_label.to(args.device).long()
            pseudo_labeled = model(img_labeled)
            #  计算损失
            loss_ce = criterion(pseudo_labeled, target_label)
            loss_dice = dice_loss(pseudo_labeled, target_label.unsqueeze(1), softmax=True)
            loss = 0.4 * loss_ce + 0.6 * loss_dice

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # lr = args.lr * (1 - cur_itrs / args.total_itrs) ** 0.9
            # for param_group in optimizer.param_groups:
            #     param_group['lr'] = lr
            lr_scheduler.step()
            lr = optimizer.param_groups[0]["lr"]

            train_loss += loss.item()
            args.writer.add_scalar('supervise/loss', loss.item(), cur_itrs)
            args.writer.add_scalar('supervise/lr', lr, cur_itrs)

            if cur_itrs % args.step_size == 0:
                dice, hd95 = test(model=model, test_loader=test_loader, args=args, cur_itrs=cur_itrs)
                args.writer.add_scalar('supervise/{}_dice'.format(args.name), dice, cur_itrs)
                args.writer.add_scalar('supervise/{}_hd95'.format(args.name), hd95, cur_itrs)
                args.logger.info("dice:{:.5f} \t hd95:{:.5f} ".format(dice, hd95))

                if dice > best_dice:
                    best_dice = dice
                    #  保存模型
                    torch.save({
                        "cur_itrs": cur_itrs,
                        "model": model.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "lr_scheduler": lr_scheduler.state_dict(),
                        "best_score": best_dice,
                    }, os.path.join(args.save_path, "model", "{}.pth".format(dice)))

                    # torch.save({
                    #     "cur_itrs": cur_itrs,
                    #     "model": model.state_dict(),
                    #     "optimizer": optimizer.state_dict(),
                    #     "lr_scheduler": lr_scheduler.state_dict(),
                    #     "best_score": best_dice,
                    # }, args.supervise_save_path)


                model.train()

            if cur_itrs > args.total_itrs:
                return

        # args.logger.info("Train [{}/{} ({:.0f}%)]\t loss: {:.5f} ".format(cur_itrs, args.total_itrs,
        #                                                                   100. * cur_itrs / args.total_itrs,
        #                                                                   train_loss
        #                                                                   ))
        train_loss = 0


def test(model, test_loader, args, cur_itrs, name="test"):
    """
    测试模型
    :param model: 模型
    :param test_loader:
    :param args:
    :param cur_itrs:
    :return:
    """
    model.eval()
    metric_list = 0.0
    for i_batch, sampled_batch in enumerate(test_loader):
        image = sampled_batch[0].to(args.device)
        label = sampled_batch[1].to(args.device)
        metric_i = test_single_volume(image, label, model, classes=args.num_classes, patch_size=args.test_crop_size)
        metric_list += np.array(metric_i)

        if i_batch == 0:
            slice = image[0, 0, :, :].cpu().detach().numpy()
            x, y = slice.shape[0], slice.shape[1]
            slice = zoom(slice, (args.test_crop_size[0] / x, args.test_crop_size[1] / y), order=0)
            img = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()

            label_pred = torch.argmax(torch.softmax(model(img), dim=1), dim=1, keepdim=False).squeeze(0)
            label_pred = label_pred.cpu().detach().numpy()
            label_pred = zoom(
                label_pred, (x / args.test_crop_size[0], y / args.test_crop_size[1]), order=0)
            label_pred = test_loader.dataset.label_to_img(label_pred)

            label_true = label[0, 0, :, :].squeeze().cpu().detach().numpy()
            label_true = test_loader.dataset.label_to_img(label_true)

            args.writer.add_image('{}/Image'.format(name), img[0], cur_itrs)
            args.writer.add_image('{}/label_pred'.format(name), label_pred, cur_itrs,dataformats='HWC')
            args.writer.add_image('{}/label_true'.format(name), label_true, cur_itrs,dataformats='HWC')

    metric_list = metric_list / len(test_loader.dataset)
    performance2 = np.mean(metric_list, axis=0)[0]
    mean_hd952 = np.mean(metric_list, axis=0)[1]
    return performance2, mean_hd952


def test_single_volume(image, label, net, classes, patch_size=[256, 256]):
    image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()
    prediction = np.zeros_like(label)
    for ind in range(image.shape[0]):
        slice = image[ind, :, :]
        x, y = slice.shape[0], slice.shape[1]
        slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=0)
        input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()
        net.eval()
        with torch.no_grad():
            out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0)
            out = out.cpu().detach().numpy()
            pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
            prediction[ind] = pred
    metric_list = []
    for i in range(1, classes):
        metric_list.append(calculate_metric_percase(prediction == i, label == i))
    return metric_list


def test_single_volume_ds(image, label, net, classes, patch_size=[256, 256]):
    image, label = image.squeeze(0).cpu().detach(
    ).numpy(), label.squeeze(0).cpu().detach().numpy()
    prediction = np.zeros_like(label)
    for ind in range(image.shape[0]):
        slice = image[ind, :, :]
        x, y = slice.shape[0], slice.shape[1]
        slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=0)
        input = torch.from_numpy(slice).unsqueeze(
            0).unsqueeze(0).float().cuda()
        net.eval()
        with torch.no_grad():
            output_main, _, _, _ = net(input)
            out = torch.argmax(torch.softmax(
                output_main, dim=1), dim=1).squeeze(0)
            out = out.cpu().detach().numpy()
            pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
            prediction[ind] = pred
    metric_list = []
    for i in range(1, classes):
        metric_list.append(calculate_metric_percase(
            prediction == i, label == i))
    return metric_list


def calculate_metric_percase(pred, gt):
    pred[pred > 0] = 1
    gt[gt > 0] = 1
    if pred.sum() > 0:
        dice = metric.binary.dc(pred, gt)
        hd95 = metric.binary.hd95(pred, gt)
        return dice, hd95
    else:
        return 0, 0


def update(score, epoch, model, optimizer, scheduler, best_score, path):
    if score > best_score:
        best_score = score
        save_ckpt(path, epoch, model, optimizer, scheduler, best_score)
    return best_score


def get_current_consistency_weight(epoch, args):
    # Consistency ramp-up from https://arxiv.org/abs/1610.02242
    return args.consistency * sigmoid_rampup(epoch, args.consistency_rampup)


def sigmoid_rampup(current, rampup_length):
    """Exponential rampup from https://arxiv.org/abs/1610.02242"""
    if rampup_length == 0:
        return 1.0
    else:
        current = np.clip(current, 0.0, rampup_length)
        phase = 1.0 - current / rampup_length
        return float(np.exp(-5.0 * phase * phase))


def update_ema_variables(model, ema_model, alpha, global_step):
    # Use the true average until the exponential average is more correct
    alpha = min(1 - 1 / (global_step + 1), alpha)
    for ema_param, param in zip(ema_model.parameters(), model.parameters()):
        ema_param.data.mul_(alpha).add_(1 - alpha, param.data)


def save_ckpt(args, cur_itrs, model, optimizer, scheduler, best_score):
    """ save current model
    """
    torch.save({
        "cur_itrs": cur_itrs,
        "model": model,
        "optimizer": optimizer,
        # "scheduler": scheduler,
        "best_score": best_score,
    }, args.model_save_path)

    print("Model saved as %s" % args.model_save_path)


def save_ema_ckpt(args, cur_itrs, model, ema_model, optimizer, scheduler, best_score):
    """ save current model
    """
    torch.save({
        "cur_itrs": cur_itrs,
        "model": model,
        "ema_model": ema_model,
        "optimizer": optimizer,
        # "scheduler": scheduler,
        "best_score": best_score,
    }, args.model_save_path)

    print("Model saved as %s" % args.model_save_path)
