import os.path
import numpy as np
import torch
from medpy import metric
from scipy.ndimage import zoom
from tqdm import tqdm
import torch.nn as nn
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter

from utils import loadyaml, _get_logger, mk_path,DiceLoss,get_lr_scheduler,get_optimizer
from model import get_model
from datasets import get_loader

def main():

    # path=r"config/unet_30k_224x224_acdc.yaml"
    path = r"config/unet_30k_64x64_lidc.yaml"
    # path = r"config/swinunet_30k_224x224_acdc.yaml"
    # path = r"config/segformer_30k_256x256_acdc.yaml"
    # path=r"config/transunet_30k_224x224_acdc.yaml"
    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args = loadyaml(os.path.join(root, path))  # 加载yaml

    if args.cuda:
        args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    else:
        args.device = torch.device("cpu")

    torch.manual_seed(args.seed)  # 设置随机种子
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path)  # 创建文件保存位置
    # 创建 tensorboardX日志保存位置
    mk_path(os.path.join(args.save_path, "tensorboardX"))
    mk_path(os.path.join(args.save_path, "model"))  # 创建模型保存位置
    args.finetune_save_path = os.path.join(args.save_path, "model", "finetune_model.pth")
    args.pretrain_save_path = os.path.join(args.save_path, "model", "pretrain_model.pth")
    args.supervise_save_path = os.path.join(args.save_path, "model", "supervise_model.pth")  # 设置模型名称

    args.writer = SummaryWriter(os.path.join(args.save_path, "tensorboardX"))
    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
    args.tqdm = os.path.join(args.save_path, "tqdm.log")

    # step 1: 构建数据集
    train_loader, test_loader = get_loader(args)
    args.epochs = args.total_itrs // len(train_loader) + 1
    args.logger.info("==========> train_loader length:{}".format(len(train_loader.dataset)))
    args.logger.info("==========> test_dataloader length:{}".format(len(test_loader.dataset)))
    args.logger.info("==========> epochs length:{}".format(args.epochs))

    # step 2: 构建模型
    model = get_model(args=args).to(device=args.device)

    # step 3: 训练模型
    supervise(model=model, train_loader=train_loader,test_loader=test_loader, args=args)



def supervise(model:nn.Module, train_loader, test_loader, args):
    optimizer=get_optimizer(args=args,model=model)

    lr_scheduler = get_lr_scheduler(args=args,optimizer=optimizer)

    max_epoch = args.total_itrs // len(train_loader) + 1
    args.logger.info("==============> max_epoch :{}".format(max_epoch))

    # config network and criterion
    criterion = nn.CrossEntropyLoss(ignore_index=255)
    dice_loss = DiceLoss(args.num_classes)
    
    cur_itrs = 0
    train_loss = 0.0
    best_dice = 0.0

    #  加载原模型
    if args.ckpt is not None and os.path.isfile(args.ckpt):
        state_dict = torch.load(args.ckpt)
        cur_itrs = state_dict["cur_itrs"]
        model = state_dict["model"]
        optimizer = state_dict["optimizer"]
        # lr_scheduler = state_dict["scheduler"]
        best_dice = state_dict["best_score"]
    
    model.train()

    pbar = tqdm(total=args.total_itrs)

    while True:
        for (img_labeled, target_label) in train_loader:
            cur_itrs += 1
            pbar.update(1)
            img_labeled = img_labeled.to(args.device).float()
            target_label = target_label.to(args.device).long()
            pseudo_labeled = model(img_labeled)
            #  计算损失
            loss_ce = criterion(pseudo_labeled, target_label)
            loss_dice = dice_loss(pseudo_labeled, target_label.unsqueeze(1), softmax=True)
            loss = 0.4 * loss_ce + 0.6 * loss_dice

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # lr = args.lr * (1 - cur_itrs / args.total_itrs) ** 0.9
            # for param_group in optimizer.param_groups:
            #     param_group['lr'] = lr
            lr_scheduler.step()
            lr = optimizer.param_groups[0]["lr"]

            train_loss += loss.item()
            args.writer.add_scalar('supervise/loss', loss.item(), cur_itrs)
            args.writer.add_scalar('supervise/lr', lr, cur_itrs)

            if cur_itrs % args.step_size == 0:
                dice, hd95 = test_lidc(model=model, test_loader=test_loader, args=args, cur_itrs=cur_itrs)
                args.writer.add_scalar('supervise/{}_dice'.format(args.name), dice, cur_itrs)
                args.writer.add_scalar('supervise/{}_hd95'.format(args.name), hd95, cur_itrs)
                args.logger.info("dice:{:.5f} \t hd95:{:.5f} ".format(dice, hd95))

                if dice > best_dice:
                    best_dice = dice
                    #  保存模型
                    torch.save({
                        "cur_itrs": cur_itrs,
                        "model": model.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "lr_scheduler": lr_scheduler.state_dict(),
                        "best_score": best_dice,
                    }, os.path.join(args.save_path, "model", "{}.pth".format(dice)))

                    # torch.save({
                    #     "cur_itrs": cur_itrs,
                    #     "model": model.state_dict(),
                    #     "optimizer": optimizer.state_dict(),
                    #     "lr_scheduler": lr_scheduler.state_dict(),
                    #     "best_score": best_dice,
                    # }, args.supervise_save_path)


                model.train()

            if cur_itrs > args.total_itrs:
                return
            
        train_loss = 0


def test_lidc(model, test_loader, args, cur_itrs, name="test"):
    model.eval()
    dice = 0.0
    hd95 = 0.0
    with torch.no_grad():
        for i, (img, label_true) in enumerate(test_loader):
            img = img.to(args.device).float()
            label_true = label_true.to(args.device).long()
            label_pred = torch.argmax(torch.softmax(model(img), dim=1), dim=1, keepdim=False)
            d, h = calculate_metric_percase(label_pred.data.cpu().numpy() == 1, label_true.data.cpu().numpy() == 1)
            dice += d*img.shape[0]
            hd95 += h*img.shape[0]
            if i == 0:
                img = make_grid(img, normalize=True, scale_each=True, nrow=8).permute(1, 2, 0).data.cpu().numpy()
                args.writer.add_image('{}/Image'.format(name), img, cur_itrs, dataformats='HWC')
                args.writer.add_image('{}/label_pred'.format(name), test_loader.dataset.label_to_img(label_pred), cur_itrs, dataformats='HWC')
                args.writer.add_image('{}/label_true'.format(name), test_loader.dataset.label_to_img(label_true), cur_itrs, dataformats='HWC')

    dice /= len(test_loader.dataset)
    hd95 /= len(test_loader.dataset)
    return dice, hd95


def calculate_metric_percase(pred, gt):
    pred[pred > 0] = 1
    gt[gt > 0] = 1
    if pred.sum() > 0:
        dice = metric.binary.dc(pred, gt)
        hd95 = metric.binary.hd95(pred, gt)
        return dice, hd95

    elif pred.sum() > 0 and gt.sum() == 0:
        return 1, 0
    else:
        return 0, 0
    

if __name__=="__main__":
    main()