import numpy as np
import os
import torch
from tqdm import tqdm
import torch.nn as nn
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import torch.nn.functional as F
from utils import mk_path, _get_logger, loadyaml, SegMetrics, build_lr_scheduler
from model import build_model
from datasets import build_loader


def main():
    # paper link https://arxiv.org/abs/2008.01449

    path = r"config/PFENet_pascal_split0_resnet50.yaml"
    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args = loadyaml(os.path.join(root, path))  # 加载yaml
    if args.cuda:
        args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    else:
        args.device = torch.device("cpu")

    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path)
    mk_path(os.path.join(args.save_path, "tensorboardX"))
    mk_path(os.path.join(args.save_path, "model"))
    args.supervise_save_path = os.path.join(args.save_path, "model", "supervise_model.pth")

    args.writer = SummaryWriter(os.path.join(args.save_path, "tensorboardX"))
    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
    args.tqdm = os.path.join(args.save_path, "tqdm.log")
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

    train_loader, test_loader = build_loader(args)  # 构建数据集
    args.epochs = args.total_itrs // len(train_loader) + 1
    args.logger.info("==========> train_loader length:{}".format(len(train_loader.dataset)))
    args.logger.info("==========> test_dataloader length:{}".format(len(test_loader.dataset)))
    args.logger.info("==========> epochs length:{}".format(args.epochs))
    args.logger.info("==========> args:{}".format(args))

    model = build_model(args=args).to(device=args.device)

    for param in model.layer0.parameters():
        param.requires_grad = False
    for param in model.layer1.parameters():
        param.requires_grad = False
    for param in model.layer2.parameters():
        param.requires_grad = False
    for param in model.layer3.parameters():
        param.requires_grad = False
    for param in model.layer4.parameters():
        param.requires_grad = False

    PFENet(model=model, train_loader=train_loader, test_loader=test_loader, args=args)


def PFENet(model: nn.Module,
           train_loader: DataLoader,
           test_loader: DataLoader,
           args):

    # for name, param in model.named_parameters():
    #     if param.requires_grad:
    #         print(name)

    optimizer = torch.optim.SGD([param for param in model.parameters() if param.requires_grad],
                                 lr=args.lr,momentum=args.momentum,weight_decay=args.weight_decay
                                )

    # optimizer = torch.optim.Adam(
    #     [{'params': model.down_query.parameters()},
    #      {'params': model.down_supp.parameters()},
    #      {'params': model.init_merge.parameters()},
    #      {'params': model.alpha_conv.parameters()},
    #      {'params': model.beta_conv.parameters()},
    #      {'params': model.inner_cls.parameters()},
    #      {'params': model.res1.parameters()},
    #      {'params': model.res2.parameters()},
    #      {'params': model.cls.parameters()}],
    #     lr=0.001)

    lr_scheduler = build_lr_scheduler(args=args, optimizer=optimizer)

    total_itrs = args.total_itrs // args.batch_size
    lr_decay_iters = [total_itrs // 3, total_itrs * 2 // 3]  # [2000, 4000]
    print(lr_decay_iters)

    max_epoch = args.total_itrs // len(train_loader) + 1
    args.logger.info("==============> max_epoch :{}".format(max_epoch))

    # config network and criterion
    criterion = nn.CrossEntropyLoss(ignore_index=255)
    best_miou = 0.0
    cur_itrs = 0
    train_loss = 0.0

    pbar = tqdm(total=args.total_itrs)

    for epoch in range(max_epoch):
        model.train()

        for batch in train_loader:

            cur_itrs += 1

            img_q= batch['query_img'].to(args.device).float()
            mask_q= batch['query_mask'].to(args.device).long()
            img_s_list=batch['support_imgs'].to(args.device).float()
            mask_s_list=batch["support_masks"].to(args.device).long()
            h, w = img_q.shape[-2:]

            output, main_loss, aux_loss = model(img_s_list, mask_s_list, img_q, mask_q)
            loss = main_loss + args.aux_weight * aux_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            poly_learning_rate(optimizer=optimizer,
                               base_lr=args.lr,
                               curr_iter=cur_itrs,
                               max_iter=args.total_itrs,
                               power=args.power,
                               )

            
            lr = optimizer.param_groups[0]["lr"]

            train_loss += loss.item()
            args.writer.add_scalar('PFENet/loss', loss.item(), cur_itrs)
            args.writer.add_scalar('PFENet/lr', lr, cur_itrs)

            if cur_itrs % args.step_size == 0:
                #  进行验证
                model.eval()
                # 原论文给的数据
                #          split0 split1 split2 split3 Mean
                # 1-shot : 56.9   68.2   54.4   52.4   58.0
                # 5-shot : 59.0   69.1   54.8   52.9   59.0
                # ssp论文给的PFENet 数据
                #          split0 split1 split2 split3 Mean
                # 1-shot : 61.7   69.5   55.4   56.3   60.8
                # 5-shot : 63.1   70.7   55.8   57.9   61.9
                miou = evaluate(cur_itrs, model, test_loader, args)
                args.writer.add_scalar('PFENet/miou', miou, cur_itrs)

                if miou > best_miou:
                    best_miou = miou
                    torch.save({
                        "model": model.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "cur_itrs": cur_itrs,
                        "best_miou": best_miou
                    }, args.supervise_save_path)

                args.logger.info("PFENet Miou: {:.4f},best Miou: {:.4f}".format(miou, best_miou))
                model.train()

            if cur_itrs > args.total_itrs:
                return

            pbar.update(1)

        args.logger.info("Train [{}/{} ({:.0f}%)] loss: {:.5f} ".format(cur_itrs, args.total_itrs,
                                                                        100. * cur_itrs / args.total_itrs,
                                                                        train_loss/len(train_loader.dataset)
                                                                        ))

        train_loss = 0


def poly_learning_rate(optimizer, base_lr, curr_iter, max_iter, power=0.9, index_split=-1, scale_lr=10., warmup=False, warmup_step=500):
    """poly learning rate policy"""
    if warmup and curr_iter < warmup_step:
        lr = base_lr * (0.1 + 0.9 * (curr_iter/warmup_step))
    else:
        lr = base_lr * (1 - float(curr_iter) / max_iter) ** power

    if curr_iter % 50 == 0:
        print('Base LR: {:.4f}, Curr LR: {:.4f}, Warmup: {}.'.format(base_lr, lr, (warmup and curr_iter < warmup_step)))

    for index, param_group in enumerate(optimizer.param_groups):
        if index <= index_split:
            param_group['lr'] = lr
        else:
            param_group['lr'] = lr * scale_lr


PALETTE = np.array([
    [0, 0, 0],
    [255, 255, 255],
])


class mIOU:
    def __init__(self, num_classes):
        self.num_classes = num_classes
        self.hist = np.zeros((num_classes, num_classes))

    def _fast_hist(self, label_pred, label_true):
        mask = (label_true >= 0) & (label_true < self.num_classes)
        hist = np.bincount(
            self.num_classes * label_true[mask].astype(int) +
            label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
        return hist

    def add_batch(self, predictions, gts):
        for lp, lt in zip(predictions, gts):
            self.hist += self._fast_hist(lp.flatten(), lt.flatten())

    def evaluate(self):
        iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
        return np.nanmean(iu[1:])


def evaluate(cur_itrs, model, dataloader, args):
    tbar = tqdm(dataloader)

    num_classes = 21 if args.datasets == 'pfenet_pascal' else 81

    metric = mIOU(num_classes)

    for i, batch in enumerate(tbar):
        img_q= batch['query_img'].to(args.device).float()
        mask_q= batch['query_mask'].to(args.device).long()
        img_s_list=batch['support_imgs'].to(args.device).float()
        mask_s_list=batch["support_masks"].to(args.device).long()
        
        cls = batch["class_id"][0].item()

        with torch.no_grad():
            out_ls = model(img_s_list, mask_s_list, img_q, mask_q)
            pred = torch.argmax(out_ls, dim=1)

        if i == 10:
            #  绘图
            image = make_grid(img_q.detach(), nrow=1, normalize=True, scale_each=True)
            args.writer.add_image('PFENet_Image/image_q', image.squeeze(), cur_itrs)

            label_pred = pred.detach().squeeze().cpu().numpy()
            label_pred = label_pred.astype(np.uint8)
            img = PALETTE[label_pred]
            img = img.astype(np.uint8)
            args.writer.add_image('PFENet_Image/label_pred', img, cur_itrs, dataformats='HWC')

            label_true = mask_q.detach().squeeze().cpu().numpy()
            label_true = label_true.astype(np.uint8)
            label_true[label_true == 255] = 0
            img = PALETTE[label_true]
            img = img.astype(np.uint8)
            args.writer.add_image('PFENet_Image/label_true', img, cur_itrs, dataformats='HWC')

        pred[pred == 1] = cls
        mask_q[mask_q == 1] = cls

        metric.add_batch(pred.cpu().numpy(), mask_q.cpu().numpy())
        tbar.set_description("Testing mIOU: %.2f" % (metric.evaluate() * 100.0))

    return metric.evaluate() * 100.0


if __name__ == '__main__':
    main()
