import os

import numpy as np
import torch
from utils import PolyLR, SegMetrics, create_lr_scheduler, mk_path, _get_logger, Cosine_LR_Scheduler
import torch.nn as nn
from tqdm import tqdm
from torchvision.utils import make_grid
from torch.utils.data import Dataset, DataLoader


def save_ckpt(path, cur_itrs, model, optimizer, scheduler, best_miou):
    """ save current model
    """
    torch.save({
        "cur_itrs": cur_itrs,
        "model": model,
        "optimizer": optimizer,
        # "scheduler": scheduler,
        "best_miou": best_miou,
    }, path)


def update(val_score, epoch, model, optimizer, scheduler, best_miou, path):
    if val_score['Mean IoU'] > best_miou:
        best_miou = val_score['Mean IoU']
        save_ckpt(path, epoch, model, optimizer, scheduler, best_miou)
    return best_miou


def get_optimizer(args, model):
    if args.opt == "sgd":
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.opt == "adamW":
        optimizer = torch.optim.AdamW(
            model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    elif args.opt == "adam":
        optimizer = torch.optim.Adam(
            model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    else:
        raise ("get_optimizer error")

    return optimizer


def get_lr_scheduler(args, optimizer):
    if args.sched == "cosine":
        lr_scheduler = Cosine_LR_Scheduler(optimizer=optimizer, base_lr=args.lr,warmup_epochs=1, num_epochs=args.total_itrs//args.step_size,
                                           iter_per_epoch=args.step_size)

    elif args.sched == "poly":
        lr_scheduler = PolyLR(optimizer, max_iters=args.total_itrs)
    elif args.sched == "":
        lr_scheduler = create_lr_scheduler(optimizer, num_step=args.step_size, epochs=args.epochs,
                                           warmup=True, warmup_epochs=3, warmup_factor=1e-4)

    else:
        raise ("get_lr_scheduler error")
    return lr_scheduler


def supervise(model: nn.Module, train_loader: DataLoader, test_loader: DataLoader, args):
    optimizer = get_optimizer(args=args, model=model)
    lr_scheduler = get_lr_scheduler(args=args, optimizer=optimizer)

    criterion = nn.CrossEntropyLoss(ignore_index=255).to(device=args.device)
    metrics = SegMetrics(args.num_classes)  # 定义评价指标
    cur_itrs = 0
    best_miou = 0.0
    args.logger.info("=================================> supervise start")
    try:
        if args.process:
            pbar = tqdm(total=args.total_itrs)
        else:
            pbar = tqdm(total=args.total_itrs, file=args.tqdm)

        for epoch in range(args.epochs):
            run_loss = 0.0
            for idx, (img, label) in enumerate(train_loader):
                cur_itrs += 1
                img = img.to(args.device).float()
                label = label.to(args.device).long()
                out = model(img)
                loss = criterion(out, label)
                run_loss += loss.item()

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                lr_scheduler.step()
                lr = optimizer.param_groups[0]["lr"]

                args.writer.add_scalar('supervise/loss', loss.item(), cur_itrs)
                args.writer.add_scalar('supervise/lr', lr, cur_itrs)

                if args.freeze_train and cur_itrs == args.unfreeze_itrs:
                    model.unfreeze_backbone()

                if cur_itrs % args.step_size == 0:
                    # 可视化train 结果，包括backbone部分的数据，以及label
                    visual(args=args, model=model, img=img, label_pred=out,
                           label_true=label, cur_itrs=cur_itrs, loader=train_loader)
                    # 开始测试
                    val_score = test(cur_itrs=cur_itrs, model=model, optimizer=optimizer, test_loader=test_loader,
                                     metrics=metrics, args=args)
                    args.logger.info("{} metric :{} ".format(args.name, metrics.to_str(val_score,test_loader.dataset.CLASSES)))

                    #  如果 iou超过
                    if val_score['Mean IoU'] > best_miou:
                        best_miou = val_score['Mean IoU']
                        torch.save({
                            "model": model,
                            "optimizer": optimizer,
                            "cur_itrs": cur_itrs,
                            "best_miou": best_miou
                        }, args.supervise_save_path)

                    args.writer.add_scalar(
                        'supervise/miou', val_score["Mean IoU"], cur_itrs)
                    args.writer.add_scalar(
                        'supervise/acc', val_score["Overall Acc"], cur_itrs)
                    model.train()

                if cur_itrs > args.total_itrs:
                    return

                pbar.update(1)

            args.logger.info(
                "Train  [{}/{} ({:.0f}%)]\t loss: {:.5f}".format(cur_itrs, args.total_itrs,
                                                                 100. * cur_itrs / args.total_itrs,
                                                                 run_loss / len(train_loader), ))
    except Exception as e:
        args.logger.info(e)
    finally:
        pbar.close()

    return model


def test(cur_itrs, model, optimizer, test_loader, metrics, args):
    model.eval()
    metrics.reset()
    args.logger.info("=========> test cur_itrs:{}".format(cur_itrs))
    with torch.no_grad():
        for idx, (images, labels) in enumerate(tqdm(test_loader)):
            optimizer.zero_grad()

            images = images.to(args.device).float()
            labels = labels.to(args.device).long()

            out = model(images)
            label_pred = out.detach().max(dim=1)[1].data.cpu().numpy()
            label_true = labels.data.cpu().numpy()
            metrics.update(label_true, label_pred)

            # 展示图
            if idx == 0:
                img_grid = make_grid(tensor=images.data.cpu(), nrow=2, normalize=True, scale_each=True)
                args.writer.add_image('test/Image', img_grid, cur_itrs)
                args.writer.add_image(
                    'test/label_pred', test_loader.dataset.label_to_img(label_pred),cur_itrs,dataformats='HWC')
                args.writer.add_image(
                    'test/label_true', test_loader.dataset.label_to_img(label_true),cur_itrs,dataformats='HWC')

    score = metrics.get_results()

    return score


def visual(args, model, img, label_pred, label_true, cur_itrs, loader):
    """
    特征图可视化
    :param args:
    :param model: 模型
    :param img: 图片
    :param label_pred: 预测结果
    :param label_true: 真实结果
    :param cur_itrs: 当前iter
    :param loader: 数据集
    :return:
    """
    model.eval()
    
    #  添加卷积层参数
    # for i, (name, param) in enumerate(model.named_parameters()):
    #     # print(name)
    #     if "backbone.conv1" in name or "backbone.layer4.2.conv3" in name or "cat_conv.5.weight" in name:
    #         args.writer.add_histogram(name, param, cur_itrs)
    # 获取特征图

    # 记录图片
    img_grid = make_grid(tensor=img.data.cpu(), nrow=2,
                         normalize=True, scale_each=True, )
    args.writer.add_image("train/img", img_grid, cur_itrs)

    label_pred = label_pred.detach().max(dim=1)[1].data.cpu().numpy()
    args.writer.add_image("train/label_pred",
                          loader.dataset.label_to_img(label_pred), cur_itrs,dataformats='HWC')

    label_true = label_true.data.cpu().numpy()
    args.writer.add_image("train/label_true",
                          loader.dataset.label_to_img(label_true), cur_itrs,dataformats='HWC')

    if isinstance(model, nn.DataParallel):
        with torch.no_grad():
            features = model.module.backbone(img)
    else:
        with torch.no_grad():
            features = model.backbone(img)

    if len(features) == 4:
        [_, low_level_features, _, x] = features
    elif len(features) == 2:
        low_level_features, x = features
    else:
        raise ("visual error")
    #  特征图
    low_level_features = make_grid(low_level_features[0].detach().cpu().unsqueeze(dim=1),
                                   normalize=True,
                                   scale_each=True,
                                   nrow=8)  # normalize进行归一化处理
    args.writer.add_image("backbone/low_level_features",
                          low_level_features, cur_itrs)
    x = make_grid(x[0].detach().cpu().unsqueeze(dim=1),
                  normalize=True,
                  scale_each=True,
                  nrow=8)  # normalize进行归一化处理
    args.writer.add_image("backbone/high_level_features", x, cur_itrs)

    # for name, layer in model._modules.items():
    #
    #     x = layer(x)
    #
    #     if 'backbone' in name or 'conv' in name:
    #         x1 = x.transpose(0, 1)  # C，B, H, W  ---> B，C, H, W
    #         img_grid = make_grid(x1, normalize=True, scale_each=True, nrow=4)  # normalize进行归一化处理
    #         writer.add_image(f'{name}_feature_maps', img_grid, global_step=cur_itrs)
