import logging
import os
import pickle
import random
import shutil
import sys
import time
from pathlib import Path

import click
import logzero
import numpy as np
import torch
import torch.nn as nn
import yaml
from logzero import logger, setup_logger
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from tqdm import tqdm

os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

torch.set_printoptions(precision=4, linewidth=400, threshold=sys.maxsize, sci_mode=False)
np.set_printoptions(precision=4, linewidth=400, threshold=sys.maxsize)

from vehicle_reid_pytorch.data import samplers
from vehicle_reid_pytorch.utils import load_checkpoint, save_checkpoint, merge_configs, get_host_ip
from vehicle_reid_pytorch import loss as vr_loss
from vehicle_reid_pytorch.data import make_basic_dataset

from models.configs.defaults import get_default_configs
from models.model import PVASFF
from models.losses.triplet import ParsingTripletLoss
from models.losses.id import ParsingIdLoss
from models.losses.circle import CircleLoss
from models.tools.math_tools import Clck_R1_mAP
from models.tools.train_tools import make_optimizer, make_scheduler, make_grid, complete_grid_config_dict

import os
import platform

_IS_WINDOWS = platform.platform().startswith("Windows")
_IS_WSL = ("WSL" in platform.platform())
_NUM_CUDA_DEVICES = 0


def _set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    if _NUM_CUDA_DEVICES > 1:
        torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True


@click.group()
def clk():
    pass


@clk.command()
@click.option("-c", "--config-file", type=str, default="")
@click.argument("cmd-configs", nargs=-1)
def train(config_file, cmd_configs):
    """
    Training models.
    """

    cfg = get_default_configs()
    cfg = merge_configs(cfg, config_file, cmd_configs)
    cfg.freeze()

    os.makedirs(cfg.output_dir, exist_ok=True)
    logzero.logfile(f"{cfg.output_dir}/train.log")
    logzero.loglevel(getattr(logging, cfg.logging.level.upper()))
    logger.info(cfg)
    logger.info(f"worker ip is {get_host_ip()}")

    if cfg.device == 'cuda':
        if cfg.device_ids != "":
            os.environ['CUDA_VISIBLE_DEVICES'] = cfg.device_ids
        _NUM_CUDA_DEVICES = torch.cuda.device_count()
        if _NUM_CUDA_DEVICES < 1:
            raise ValueError(f"cannot perform cuda training due to insufficient cuda device")
        logger.info(f"{_NUM_CUDA_DEVICES} cuda device found!")

    train_dataset, valid_dataset, meta_dataset = make_basic_dataset(cfg.data.pkl_path,
                                                                    cfg.data.train_size,
                                                                    cfg.data.valid_size,
                                                                    cfg.data.pad,
                                                                    test_ext=cfg.data.test_ext,
                                                                    re_prob=cfg.data.re_prob,
                                                                    with_mask=cfg.data.with_mask,
                                                                    verbose=False)
    num_class = meta_dataset.num_train_ids
    logger.info(f"Building {cfg.backbone.name} model, "
                f"num class is {num_class}")
    model = PVASFF(cfg, num_class)

    logger.info(f"Building {cfg.optim.name} optimizer...")
    optimizer = make_optimizer(model, cfg)

    start_epoch = 1
    logger.info(f"Loading checkpoint from {cfg.output_dir}")
    start_epoch = load_checkpoint(cfg.output_dir, cfg.device, model=model)
    if start_epoch > cfg.train.epochs:
        logger.info(f"The training corresponding to this config file {Path(config_file).name} was over")
        return
    elif start_epoch > 1:
        logger.info(f"Loaded checkpoint successfully! Start epoch is {start_epoch}")
    else:
        logger.info(f"Cannot find pre-trained checkpoint. Start training from epoch 1")
        # backup source file
        _SRC_DIR = Path('models')
        _BACKUP_DIR = Path(cfg.output_dir) / 'backup'
        if os.path.exists(_BACKUP_DIR):
            shutil.rmtree(_BACKUP_DIR)
        os.makedirs(_BACKUP_DIR, exist_ok=False)
        for name in (
                'blocks/pam.py', 'blocks/mASFF.py', 'blocks/mResNet.py', 'losses/id.py', 'model.py',
                'configs/defaults.py'):
            _src_file = _SRC_DIR / name
            _dst_file = _BACKUP_DIR / _src_file.name
            shutil.copyfile(_src_file, _dst_file)

    if _NUM_CUDA_DEVICES > 1:
        model = nn.DataParallel(model)
        logger.info(f"Parallelized model on {_NUM_CUDA_DEVICES} cuda devices")

    # set random seeds
    _set_seed(cfg.seed)

    logger.info(f"Loading {cfg.data.name} dataset")
    train_dataset, valid_dataset, meta_dataset = make_basic_dataset(cfg.data.pkl_path,
                                                                    cfg.data.train_size,
                                                                    cfg.data.valid_size,
                                                                    cfg.data.pad,
                                                                    test_ext=cfg.data.test_ext,
                                                                    re_prob=cfg.data.re_prob,
                                                                    with_mask=cfg.data.with_mask, )
    num_class = meta_dataset.num_train_ids
    sampler = getattr(samplers, cfg.data.sampler)(train_dataset.meta_dataset, cfg.data.batch_size,
                                                  cfg.data.num_instances)

    if _IS_WINDOWS:
        logger.warning(f"Cannot perform multi-worker DataLoader on Windows, "
                       f"ignored 'data.train_num_workers'")
        train_loader = DataLoader(train_dataset, sampler=sampler, batch_size=cfg.data.batch_size,
                                  num_workers=0,
                                  pin_memory=True)
        valid_loader = DataLoader(valid_dataset, batch_size=cfg.data.batch_size,
                                  num_workers=cfg.data.test_num_workers, pin_memory=True, shuffle=False)
    else:
        def _init_fn(worker_id):
            np.random.seed(int(cfg.seed) + worker_id)

        # pin_memory not supported on WSL: raise out of memory error
        train_loader = DataLoader(train_dataset, sampler=sampler, batch_size=cfg.data.batch_size,
                                  num_workers=cfg.data.train_num_workers,
                                  pin_memory=(not _IS_WSL), worker_init_fn=_init_fn)
        valid_loader = DataLoader(valid_dataset, batch_size=cfg.data.batch_size,
                                  num_workers=cfg.data.test_num_workers,
                                  pin_memory=(not _IS_WSL), shuffle=False, worker_init_fn=_init_fn)

    logger.info(f"Successfully load {cfg.data.name}!")

    writter = SummaryWriter(logdir=f"{cfg.output_dir}/tensorlog")

    logger.info(f"Building losses {cfg.loss.losses}")
    built_losses = []
    center_loss = None
    optimizer_center = None
    if 'local-triplet' in cfg.loss.losses:
        pt_loss = ParsingTripletLoss(margin=0.3)
        built_losses.append('local-triplet')
    if 'global-focal' in cfg.loss.losses:
        gf_loss = ParsingIdLoss(num_class, cfg.loss.pid_gamma)
        built_losses.append('global-triplet')
    if 'local-focal' in cfg.loss.losses:
        lf_loss = ParsingIdLoss(num_class, cfg.loss.pid_gamma)
        built_losses.append('local-focal')
    if 'triplet' in cfg.loss.losses:
        triplet_loss = vr_loss.TripletLoss(margin=cfg.loss.triplet_margin)
        built_losses.append('triplet')
    if 'id' in cfg.loss.losses:
        id_loss = vr_loss.CrossEntropyLabelSmooth(num_class, cfg.loss.id_epsilon)
        built_losses.append('id')
    if 'circle' in cfg.loss.losses:
        circle_loss = CircleLoss(cfg.loss.circle_margin, cfg.loss.circle_gamma)
        built_losses.append('circle')
    if 'center' in cfg.loss.losses:
        if _NUM_CUDA_DEVICES > 1:  # False:  #
            center_loss = vr_loss.CenterLoss(num_class, feat_dim=model.module.backbone_out_channels).to(cfg.device)
        else:
            center_loss = vr_loss.CenterLoss(num_class, feat_dim=model.backbone_out_channels).to(cfg.device)
        if start_epoch > 1:
            assert load_checkpoint(cfg.output_dir, cfg.device, center_loss=center_loss) == start_epoch
            logger.info(f"Loaded center loss checkpoint successfully!")
        optimizer_center = torch.optim.SGD(center_loss.parameters(), cfg.loss.center_lr)
        built_losses.append('center')
    if 'tuplet' in cfg.loss.losses:
        tuplet_loss = vr_loss.TupletLoss(cfg.data.num_instances, cfg.data.batch_size // cfg.data.num_instances,
                                         cfg.loss.tuplet_s, cfg.loss.tuplet_beta)
        built_losses.append('tuplet')

    logger.info(f"Successfully build {built_losses}")

    scheduler_params = dict(cfg.scheduler)
    del scheduler_params['step_by_batch']
    if cfg.scheduler.warmup_steps < 0 or cfg.scheduler.standup_steps < 0:
        raise ValueError(f"excepted scheduler.warmup_steps and scheduler.standup_steps must greater than zero, "
                         f"got {cfg.scheduler.warmup_steps} and {cfg.scheduler.standup_steps}")
    elif cfg.scheduler.step_by_batch and (
            cfg.scheduler.warmup_steps < len(train_loader) or
            cfg.scheduler.standup_steps < len(train_loader)):
        logger.warning(f"expected scheduler steps per batch, but got small step iteration number(s), "
                       f"check configuration file for 'scheduler.warmup_steps' and 'scheduler.standup_steps', "
                       f"ignore this message if you are confident with your configurations")
    if cfg.scheduler.step_by_batch:
        _STEPS = (start_epoch - 1) * len(train_loader)  # inaccurate due to flexible length of train_loader
    else:
        _STEPS = start_epoch - 1
    scheduler = make_scheduler(optimizer=optimizer,
                               last_epoch=_STEPS, **scheduler_params)

    logger.info("Start training!")
    for epoch in range(start_epoch, cfg.train.epochs + 1):
        t_begin = time.time()
        running_loss = 0
        running_acc = 0
        gpu_time = 0
        data_time = 0
        model = model.cuda()
        t0 = time.time()
        model.train()
        for iter, batch in enumerate(train_loader):
            t1 = time.time()
            data_time += t1 - t0
            global_steps = (epoch - 1) * len(train_loader) + iter
            optimizer.zero_grad()

            if 'center' in cfg.loss.losses:
                optimizer_center.zero_grad()

            for name, item in batch.items():
                if isinstance(item, torch.Tensor):
                    batch[name] = item.to(cfg.device)

            output = model(**batch)
            global_feat = output["global_feat"]
            global_score = output["global_cls_score"]
            local_feat = output["local_feat"]
            local_score = output["local_cls_score"]
            vis_score = output["part_weights"]
            lf_xent_score = output["lf_xent"]
            # losses
            loss = torch.tensor([0.], dtype=torch.float, device=cfg.device)
            if 'id' in cfg.loss.losses:
                # g_xent_loss = torch.tensor([0.], dtype=torch.float, device=cfg.device)
                # test from msba
                lf_xent_loss = torch.tensor([0.], dtype=torch.float, device=cfg.device)
                total_loss = id_loss(lf_xent_score[0], batch["id"])

                for i in range(1, len(lf_xent_score)):
                    lf_xent_loss += id_loss(lf_xent_score[i], batch["id"])
                loss = lf_xent_loss
                writter.add_scalar("lf_xent_loss/id_loss",
                                   lf_xent_loss.item(), global_steps)

                # for g_score in global_score:
                #     g_xent_loss += id_loss(g_score, batch["id"]).mean()
                # loss += g_xent_loss / len(global_score)
                # writter.add_scalar("global_loss/id_loss",
                #                    g_xent_loss.item(), global_steps)

            if 'triplet' in cfg.loss.losses:
                g_t_loss = 0
                for g_feat in global_feat:
                    t_loss, _, _ = triplet_loss(g_feat, batch["id"], normalize_feature=False)
                    g_t_loss += t_loss
                loss += g_t_loss / len(global_feat)
                writter.add_scalar("global_loss/triplet_loss", g_t_loss.item(), global_steps)

            if 'center' in cfg.loss.losses:
                g_center_loss = center_loss(global_feat[-1], batch["id"])
                loss += cfg.loss.center_weight * g_center_loss
                logger.debug(g_center_loss.item())
                writter.add_scalar("global_loss/center_loss", g_center_loss.item(), global_steps)

            if 'circle' in cfg.loss.losses:
                c_loss = 0
                for g_feat in global_feat:
                    c_loss += circle_loss(g_feat, batch["id"])
                loss += c_loss / len(global_feat) * cfg.loss.circle_weight
                writter.add_scalar("global_loss/circle_loss", c_loss.item(), global_steps)

            if 'tuplet' in cfg.loss.losses:
                g_tuplet_loss = 0
                for g_feat in global_feat:
                    g_tuplet_loss += tuplet_loss(g_feat)
                loss += g_tuplet_loss / len(global_feat)
                print(loss, "$$$$$$$$$$$$$ 4\n")
                writter.add_scalar("global_loss/tuplet_loss", g_tuplet_loss.item(), global_steps)

            if 'local-triplet' in cfg.loss.losses:
                l_t_loss = 0
                for l_feat in local_feat:
                    l_triplet_loss, _, _ = pt_loss(
                        l_feat, vis_score, batch["id"], True)
                    l_t_loss += l_triplet_loss
                loss += l_t_loss / len(local_feat)
                writter.add_scalar("local_loss/triplet_loss", l_t_loss.item(), global_steps)

            if 'global-focal' in cfg.loss.losses:
                g_focal_loss = 0
                for g_score in global_score:
                    g_focal_loss += gf_loss(g_score.unsqueeze(2), batch["id"],
                                            torch.ones_like(batch["id"]) * cfg.loss.pid_alpha)
                loss += g_focal_loss / len(global_score)
                print(loss, "$$$$$$$$$$$$$ 6\n")
                writter.add_scalar("global_loss/focal_loss", g_focal_loss.item(), global_steps)

            if 'local-focal' in cfg.loss.losses:
                l_focal_loss = 0
                i = 0
                for l_score in local_score:
                    l_focal_loss += lf_loss(l_score.unsqueeze(2), batch["id"],
                                            torch.ones_like(batch["id"]) * cfg.loss.pid_alpha)
                    # l_focal_loss += lf_loss(l_score.unsqueeze(2), batch["id"], vis_score[:, i])
                i = i + 1
                loss += l_focal_loss / len(local_score)
                writter.add_scalar("local_loss/focal_loss", l_focal_loss.item(), global_steps)

            loss.backward()
            optimizer.step()

            # centerloss单独优化
            if 'center' in cfg.loss.losses:
                for param in center_loss.parameters():
                    param.grad.data *= (1. / cfg.loss.center_weight)
                optimizer_center.step()

            acc = torch.ones_like(batch["id"], dtype=torch.bool)
            for g_score in global_score:
                acc = acc & (g_score.max(1)[1] == batch["id"])
            acc = acc.float().mean()

            # running mean
            if iter == 0:
                running_acc = acc.item()
                running_loss = loss.item()
            else:
                running_acc = 0.98 * running_acc + 0.02 * acc.item()
                running_loss = 0.98 * running_loss + 0.02 * loss.item()

            if iter % cfg.logging.period == 0:
                logger.info(
                    f"Epoch[{epoch:3d}] Iteration[{iter:4d}/{len(train_loader):4d}] "
                    f"Loss: {running_loss:.3f}, Acc: {running_acc:.3f}, Base lr: {cfg.optim.base_lr * scheduler.get_last_lr_factor():.3e}")
            t0 = time.time()
            gpu_time += t0 - t1
            if cfg.scheduler.step_by_batch:
                scheduler.step()

        t_end = time.time()
        if not cfg.scheduler.step_by_batch:
            scheduler.step()

        logger.info(
            f"Epoch {epoch} done. Time per epoch: {t_end - t_begin:.1f}[s] "
            f"Speed:{(t_end - t_begin) / len(train_loader.dataset):.1f}[samples/s] ")
        logger.info(f"GPU Time: {gpu_time:.2f}[s], Data Time: {data_time:.2f}[s]")
        logger.info('-' * 10)

        # save checkpoint
        if epoch % cfg.model.ckpt_period == 0 or epoch == 1 or epoch > cfg.train.epochs - 10:
            logger.info(f"Saving models in epoch {epoch}")
            save_checkpoint(epoch, cfg.output_dir, model=model)
            if 'center' in cfg.loss.losses:
                save_checkpoint(epoch, cfg.output_dir, center_loss=center_loss)

        if (epoch == 1 or epoch % cfg.test.period == 0 or epoch > cfg.train.epochs - 10) and \
                cfg.data.name.lower() != 'veriwild' and \
                cfg.data.name.lower() != 'vehicleid':
            query_length = meta_dataset.num_query_imgs
            if query_length != 0:  # Private没有测试集
                _eval_impl(model, cfg.device, cfg.data.name,
                           valid_loader, query_length,
                           feat_norm=cfg.test.feat_norm,
                           remove_junk=cfg.test.remove_junk,
                           lambda_=cfg.test.lambda_,
                           debug=cfg.test.debug,
                           output_dir="")


@clk.command()
@click.argument("schedule-config-file", type=str)
@click.pass_context
def scheduled_train(ctx, schedule_config_file):
    schedule_config_file = open(schedule_config_file, 'r')
    config_files = schedule_config_file.readlines()
    print("scheduled training config file(s):")
    for config_file in config_files:
        config_filename = config_file.rstrip('\n')
        if config_file.startswith('#'):  # view '#' lines as annotations
            continue
        if config_file == '\n':  # skip blank lines
            continue
        print("\t", config_filename)
        if not os.path.exists(config_filename):
            raise FileNotFoundError(f"cannot find specificed training config {config_filename} "
                                    f"make sure you provide valid path to config files\n"
                                    f"NOTE: relevent path is from [{os.getcwd()}] to [path-you-store-config-file]")

    for config_file in config_files:
        if config_file.startswith('#'):  # view '#' lines as annotations
            continue
        if config_file == '\n':  # skip blank lines
            continue

        config_filename = config_file.rstrip('\n')
        print(f"loading config file: {config_filename}")
        ctx.invoke(train, config_file=config_filename)
        ctx.invoke(eval, config_file=config_filename)

    print("done!")


@clk.command()
@click.argument("grid-config-file", type=str)
@click.option("-c", "--train-config-file", type=str, default="")
@click.option("--generate", "mode", flag_value="generate")
@click.option("--execute", "mode", flag_value="execute", default=True)
@click.option("--save-config-file", is_flag=True, default=False)
@click.pass_context
def grid_search(ctx, grid_config_file, train_config_file, save_config_file, mode):
    _RESERVED_KEY = ('save', 'repeat', 'seed')
    _MAP_BASELINE = 0.74
    _CMC_BASELINE = 0.935

    cfg = get_default_configs()
    cfg = merge_configs(cfg, train_config_file, [])
    grid: dict = {}

    grid_config_file = Path(grid_config_file)
    with open(grid_config_file, 'r') as grid_config:
        grid_cfg_as_dict = yaml.safe_load(grid_config.read())
    assert isinstance(grid_cfg_as_dict, dict), "failed to load grid configs!"
    if 'save' in grid_cfg_as_dict and grid_cfg_as_dict['save'] != '':
        _SAVE_PATH = Path(grid_cfg_as_dict['save'])
        _CHECKPOINT_PATH = _SAVE_PATH
        _LOG_FILE_PATH = _SAVE_PATH.parent / (_SAVE_PATH.stem + '.log')
        if str(_SAVE_PATH.parent) != '.':
            os.makedirs(str(_SAVE_PATH.parent), exist_ok=True)
    else:
        _CHECKPOINT_PATH = grid_config_file.stem + ".pkl"
        _LOG_FILE_PATH = grid_config_file.stem + ".log"
        grid_cfg_as_dict['save'] = _CHECKPOINT_PATH
    _REPEAT = 1
    if 'repeat' in grid_cfg_as_dict:
        _REPEAT = int(grid_cfg_as_dict['repeat'])
        if _REPEAT <= 0:
            raise ValueError(f"argument 'repeat' must be greater than zero, got {_REPEAT}")
    else:
        grid_cfg_as_dict['repeat'] = _REPEAT
    _SEED = [cfg.seed]
    if 'seed' in grid_cfg_as_dict:
        _SEED = grid_cfg_as_dict['seed']
        if _REPEAT == 1 and isinstance(_SEED, str):
            _SEED = [int(_SEED)]
        elif not isinstance(_SEED, list):
            raise TypeError(f"argument 'seed' must be a list of integers, got type {type(_SEED)}")
    else:
        if _REPEAT > 1:
            raise ValueError("missing argument 'seed': you must specify a list of seeds "
                             "if you want to execute multiple experiments with one group of hyperparameters")
        grid_cfg_as_dict['seed'] = _SEED
    logfile = setup_logger(name="grid_search_log", logfile=_LOG_FILE_PATH, level=logging.INFO)
    logfile.info(f"loaded grid search config from {grid_config_file}")
    for key, config in grid_cfg_as_dict.items():
        if isinstance(config, dict):
            config = complete_grid_config_dict(config)
        logfile.info(f"{key}: {str(config)}")

    try:  # try resume progress from checkpoint
        with open(_CHECKPOINT_PATH, 'rb') as _PKL_FILE:
            checkpoint = pickle.load(_PKL_FILE)
        _LOGGED = True
        _LOGGED_GRID_CONFIG = checkpoint['GRID_CONFIG']
        _GRID_CONFIG = {}
        status = checkpoint['STATUS']
        logfile.info(f"loading checkpoint from {_CHECKPOINT_PATH}")
        if 'REPEAT' not in checkpoint:
            checkpoint['REPEAT'] = _REPEAT
        elif checkpoint['REPEAT'] != _REPEAT:
            raise ValueError(f"invalid checkpoint: key 'REPEAT' are not consistent with current configs, "
                             f"got {checkpoint['REPEAT']} and {_REPEAT}")
        if 'SEED' not in checkpoint:
            checkpoint['SEED'] = _SEED
        elif checkpoint['SEED'] != _SEED:
            raise ValueError(f"invalid checkpoint: key 'SEED' are not consistent with current configs, "
                             f"got {checkpoint['SEED']} and {_SEED}")
    except FileNotFoundError:
        checkpoint = {'GRID_CONFIG': {}, 'STATUS': [], 'METRICS': [], 'REPEAT': _REPEAT, 'SEED': _SEED}
        _LOGGED = False
        _GRID_CONFIG = {}
        status = []

    for key, key_config_dict in grid_cfg_as_dict.items():
        if key not in _RESERVED_KEY:
            if len(key.split('.')) != 2:
                raise ValueError(f"excepted input key as (node).(key), got '{key}'")
            node, leaf = key.split('.')
            if node in cfg and leaf in getattr(cfg, node):
                _GRID_CONFIG[key] = complete_grid_config_dict(key_config_dict)
                grid[key] = make_grid(key_config_dict)
            else:
                raise ValueError(f"cannot merge a non-exist key '{leaf}' into node '{node}'")

    if _LOGGED:  # check if checkpoint is consistent with current configs
        if _GRID_CONFIG != _LOGGED_GRID_CONFIG:
            raise ValueError(f"invalid checkpoint: grid configs are not consistent with current configs")
        logfile.info(f"resumed progress from checkpoint {_CHECKPOINT_PATH} successfully")
    else:  # creat new checkpoint from dumping configs
        checkpoint['GRID_CONFIG'] = _GRID_CONFIG
        with open(_CHECKPOINT_PATH, "wb") as _PKL_FILE:
            pickle.dump(checkpoint, _PKL_FILE)
        logfile.info(f"grid config dumped to {_CHECKPOINT_PATH}")

    depth = [len(grid_v) for __, grid_v in grid.items()]
    limit = np.prod(depth) * _REPEAT
    factor = [np.prod(depth[i:]) // depth[i] for i in range(len(grid))]
    if len(status) == limit:
        logfile.info(f"grid search has completed")
        return
    elif len(status) > 0:
        logfile.info(f"scheduled remaining {limit - len(status)} hyperparameter groups")
    else:
        logfile.info(f"scheduled {limit} hyperparameter groups")

    for indicator in range(len(status), limit):
        _STATUS_STRING = ""
        _ROUND = indicator // _REPEAT
        cmd_list = []
        logfile.info(f"executing {indicator + 1}-th grid searching")

        for i, key in enumerate(grid):
            _CUR_ROUND = _ROUND // factor[i]
            _ROUND = _ROUND % factor[i]
            _CUR_KEY_NAME = key
            _CUR_HYPERPARAMETER = grid[key][_CUR_ROUND]
            _STATUS_STRING += f"+{_CUR_KEY_NAME}_{_CUR_HYPERPARAMETER:.3e}"
            cmd_list.extend([_CUR_KEY_NAME, _CUR_HYPERPARAMETER])
        _OUTPUT_DIR = ""
        if _REPEAT > 1:
            _OUTPUT_DIR = cfg.output_dir.rstrip(
                '/\\') + f"_{Path(_CHECKPOINT_PATH).stem}_{indicator // _REPEAT + 1}_{indicator % _REPEAT + 1}"
        else:
            _OUTPUT_DIR = cfg.output_dir.rstrip('/\\') + f"_{Path(_CHECKPOINT_PATH).stem}_{indicator // _REPEAT + 1}"
        cmd_list.extend(['seed', _SEED[indicator % _REPEAT]])
        cmd_list.extend(['output_dir', _OUTPUT_DIR])
        cmd_list.extend(['test.model_path', str(Path(_OUTPUT_DIR) / f"model_{cfg.train.epochs}.pth")])
        logfile.info(f"generated config: {_STATUS_STRING}")
        os.makedirs(_OUTPUT_DIR, exist_ok=True)

        if mode == 'execute':
            if save_config_file:
                _CFG_T = cfg.clone()
                _CFG_T.merge_from_list(cmd_list)
                _DUMP_PATH = Path(_CFG_T.output_dir) / 'config.yml'
                with open(_DUMP_PATH, 'w') as f:
                    print(_CFG_T, file=f)
                logfile.info(f"{indicator + 1}-th config dumped to {_DUMP_PATH}")

            logfile.info(f"executing {indicator + 1}-th training")
            ctx.invoke(train, config_file=train_config_file, cmd_configs=cmd_list)
            CMC, mAP = ctx.invoke(eval, config_file=train_config_file, cmd_configs=cmd_list)

            # dump checkpoint
            _METRICS_STRING = f"mAP:{mAP:.1%}, CMC@1:{CMC[0]:.2%}, CMC@5:{CMC[4]:.2%}"
            checkpoint['METRICS'].append(_METRICS_STRING)
            logfile.info(f"result: {_METRICS_STRING}")

            # remove bad model checkpoints
            if _REPEAT == 1:
                if mAP < _MAP_BASELINE and CMC[0] < _CMC_BASELINE:
                    _FILE_LIST = os.listdir(_OUTPUT_DIR).copy()
                    for _FILE in _FILE_LIST:
                        if _FILE[:-3] == 'pth':
                            os.remove(_FILE)
                    logfile.info(
                        f"removed {indicator + 1}-th checkpoint due to its low performance: mAP@{mAP:.1%}, CMC@1:{CMC[0]:.2%}")
            # preserve best model per round only
            elif (indicator + 1) % _REPEAT == 0:
                _PERFORMANCE_THIS_ROUND = checkpoint['METRICS'][-_REPEAT:]
                mAPs = []
                CMCs = []
                for performance in _PERFORMANCE_THIS_ROUND:
                    mAPs.append(float(performance.split(", ")[0][4:-1]) / 100.)  # convert scalar to percentage
                    CMCs.append(float(performance.split(", ")[1][6:-1]) / 100.)  # convert scalar to percentage
                _BEST_MAP = np.argmax(mAPs)
                _BEST_CMC = np.argmax(CMCs)
                _PRESERVE_PATH = []
                if mAPs[_BEST_MAP] > _MAP_BASELINE:
                    _PRESERVE_PATH.append[_BEST_MAP]
                if CMCs[_BEST_CMC] > _CMC_BASELINE:
                    _PRESERVE_PATH.append[_BEST_CMC]
                _BLANK_PATH = _OUTPUT_DIR.rstrip('0123456789')
                for i in range(_REPEAT):
                    if i in _PRESERVE_PATH:
                        continue
                    _CUR_PATH = _BLANK_PATH + str(i + 1)
                    _FILE_LIST = os.listdir(_CUR_PATH).copy()
                    for _FILE in _FILE_LIST:
                        if _FILE[:-3] == 'pth':
                            os.remove(_FILE)
                logger.info(f"cleaned checkpoints from {indicator - _REPEAT + 2}-th to {indicator + 1}-th searching")

        else:
            _CFG_T = cfg.clone()
            _CFG_T.merge_from_list(cmd_list)
            _DUMP_PATH = Path(_CFG_T.output_dir) / 'config.yml'
            with open(_DUMP_PATH, 'w') as f:
                print(_CFG_T, file=f)
            logfile.info(f"{indicator + 1}-th config dumped to {_DUMP_PATH}")

        checkpoint['STATUS'].append(_STATUS_STRING)
        with open(_CHECKPOINT_PATH, "wb") as _PKL_FILE:
            pickle.dump(checkpoint, _PKL_FILE)
        logfile.info(f"checkpoint dumped to {_CHECKPOINT_PATH}")


@clk.command()
@click.option("--epoch", type=int, default=-1)
@click.option("-c", "--config-file", type=str, default="")
@click.option("--debug", is_flag=True, default=False)
@click.argument("cmd-configs", nargs=-1)
def eval(epoch, debug, config_file, cmd_configs):
    cfg = get_default_configs()
    # cfg.set_new_allowed(True) # should work on yacs>=0.1.8
    # cfg.test.set_new_allowed(False)
    # compatible with decrapted configs
    from yacs.config import CfgNode
    def set_new_allowed(cfg, is_new_allowed):
        cfg.__dict__[CfgNode.NEW_ALLOWED] = is_new_allowed
        for v in cfg.__dict__.values():
            if isinstance(v, CfgNode):
                set_new_allowed(v, is_new_allowed)
        for v in cfg.values():
            if isinstance(v, CfgNode):
                set_new_allowed(v, is_new_allowed)

    set_new_allowed(cfg, True)
    set_new_allowed(cfg.test, False)

    cfg = merge_configs(cfg, config_file, cmd_configs)

    os.makedirs(cfg.output_dir, exist_ok=True)
    model_dir = Path(cfg.test.model_path).parent
    logzero.logfile(f"{model_dir}/eval.log")
    logzero.loglevel(getattr(logging, cfg.logging.level.upper()))
    if cfg.test.output_dir != "":
        os.makedirs(cfg.test.output_dir, exist_ok=True)

    train_dataset, valid_dataset, meta_dataset = make_basic_dataset(cfg.data.pkl_path,
                                                                    cfg.data.train_size,
                                                                    cfg.data.valid_size,
                                                                    cfg.data.pad,
                                                                    test_ext=cfg.data.test_ext,
                                                                    re_prob=cfg.data.re_prob,
                                                                    with_mask=cfg.data.with_mask,
                                                                    )
    valid_loader = DataLoader(valid_dataset,
                              batch_size=cfg.data.batch_size,
                              num_workers=cfg.data.test_num_workers,
                              pin_memory=(not _IS_WSL),
                              shuffle=False)

    model = PVASFF(cfg, meta_dataset.num_train_ids, debug).to(cfg.test.device)
    if epoch < 0:  # no specified test epoch
        model_path = cfg.test.model_path
    else:  # specified test epoch
        logger.info(f"Load specificed checkpoint at epoch {epoch}")
        model_path = str(model_dir / f"model_{epoch}.pth")
        cfg.test.model_path = model_path
    cfg.test.debug = debug
    logger.info(cfg.test)
    logger.info(f"Loading model from {model_path}")
    state_dict = torch.load(model_path, map_location=cfg.test.device)
    model.load_state_dict(state_dict, strict=False)

    if cfg.test.device == 'cuda':
        _NUM_CUDA_DEVICES = torch.cuda.device_count()
        if _NUM_CUDA_DEVICES > 1:
            model = nn.DataParallel(model)
        logger.info(f"Evaluating model on {_NUM_CUDA_DEVICES} cuda device(s)")

    query_length = meta_dataset.num_query_imgs
    return _eval_impl(model, cfg.test.device, cfg.data.name,
                      valid_loader, query_length,
                      feat_norm=cfg.test.feat_norm,
                      remove_junk=cfg.test.remove_junk,
                      max_rank=cfg.test.max_rank,
                      output_dir=cfg.test.output_dir,
                      lambda_=cfg.test.lambda_,
                      rerank=cfg.test.rerank,
                      split=cfg.test.split,
                      debug=debug)


def _eval_impl(model, device, dataset, valid_loader, query_length,
               feat_norm=True,
               remove_junk=True,
               max_rank=50,
               output_dir='',
               rerank=False,
               lambda_=0.5,
               split=0,
               debug=False):
    processed = False

    metric = Clck_R1_mAP(query_length, max_rank=max_rank, rerank=rerank, remove_junk=remove_junk,
                         feat_norm=feat_norm, output_path=output_dir, lambda_=lambda_, device=device)
    model.eval()
    _BRIEF_ONLY = ("attention_weights")
    with torch.no_grad():
        for batch in tqdm(valid_loader):
            for name, item in batch.items():
                if isinstance(item, torch.Tensor):
                    batch[name] = item.to(device)

            output = model(**batch)
            global_feat = output["global_feat"][0].to(device)
            local_feat = output["local_feat"][0].to(device)
            vis_score = output["part_weights"].to(device)
            global_cls_score = output["global_cls_score"][0].to(device)
            local_cls_score = output["local_cls_score"][0].to(device)

            if debug and not processed:
                debug_dir = Path(output_dir) / 'debug'
                if os.path.exists(debug_dir):
                    shutil.rmtree(debug_dir)
                os.makedirs(str(debug_dir), exist_ok=False)
                vis_dir = debug_dir / 'visualize'
                os.makedirs(str(vis_dir), exist_ok=False)
                for key, value in output.items():
                    print(key, "shape=", value.shape)
                    if key not in _BRIEF_ONLY:
                        with open(debug_dir / (key + '.txt'), 'w') as debug_file:
                            print(value, file=debug_file)
                    import cv2 as cv
                    # visualize asff weight map(s)
                    if 'asff' in key:
                        for i, w in enumerate(value):
                            dst_size = (224, 224)
                            sample = cv.imread(batch["image_path"][i])
                            background = cv.resize(sample, dst_size, interpolation=cv.INTER_AREA)
                            level, height, width = w.shape
                            colored_weight = np.zeros((height, width, 3)).astype(np.uint8)
                            for scale in range(level):
                                weight = (w[scale].cpu().numpy() * 255).astype(np.uint8)
                                colored_weight[..., scale] += weight
                            foreground = cv.resize(colored_weight, dst_size, interpolation=cv.INTER_NEAREST)
                            visualization = cv.addWeighted(background, 0.2, foreground, 0.8, 0)
                            cv.imwrite(str(vis_dir / f"{key}_{i}.png"), visualization)
                    # visualize fused feature map(s)
                    if 'fused_feat' in key:
                        for i, w in enumerate(value):
                            dst_size = (224, 224)
                            sample = cv.imread(batch["image_path"][i])
                            background = cv.resize(sample, dst_size, interpolation=cv.INTER_AREA)
                            height, width = w.shape
                            colored_weight = np.zeros((height, width, 3)).astype(np.uint8)
                            w_max = torch.max(w[w > 0]) if w[w > 0] == torch.Size([]) else 1
                            w_min = torch.min(w[w < 0]) if w[w < 0] == torch.Size([]) else 1
                            colored_weight[..., 0] = (((w > 0) * w / w_max).cpu().numpy() * 255).astype(np.uint8)
                            colored_weight[..., 1] = (((w < 0) * w / w_min).cpu().numpy() * 255).astype(np.uint8)
                            foreground = cv.resize(colored_weight, dst_size, interpolation=cv.INTER_NEAREST)
                            visualization = cv.addWeighted(background, 0.2, foreground, 0.8, 0)
                            cv.imwrite(str(vis_dir / f"{key}_{i}.png"), visualization)

            metric.update((global_feat.detach().cpu(), local_feat.detach().cpu(), vis_score.cpu(),
                           global_cls_score.detach().cpu(), local_cls_score.detach().cpu(),
                           batch["id"].cpu(), batch["cam"].cpu(), batch["image_path"]))
            processed = True

    cmc = 0.
    mAP = 0.
    if dataset.lower() == 'vehicleid':
        mAPs = []
        cmcs = []
        for i in range(10):
            metric.resplit_for_vehicleid()
            metric_output = metric.compute()
            cmc = metric_output['cmc']
            mAP = metric_output['mAP']
            mAPs.append(mAP)
            cmcs.append(cmc)
        cmc = np.mean(cmcs, axis=0)
        mAP = np.mean(mAPs)
    else:
        result = metric.compute(split=split)
        cmc = result['cmc']
        mAP = result['mAP']

    if debug:
        result_dir = Path(output_dir) / 'debug' / 'result'
        if os.path.exists(result_dir):
            shutil.rmtree(result_dir)
        os.makedirs(result_dir)
        q_pids = np.asarray(metric.pids[:metric.num_query])
        g_pids = np.asarray(metric.pids[metric.num_query:])
        pick = np.argsort(result['distmat'])
        pick = pick[:, :10]
        for q_index, order in enumerate(pick):
            correction = g_pids[order] == q_pids[q_index]
            cur_res_dir = result_dir / str(q_index + 1)
            os.makedirs(cur_res_dir)
            shutil.copy(metric.paths[q_index], cur_res_dir / 'query.jpg')
            for img_index in range(len(order)):
                img_name = metric.paths[metric.num_query + order[img_index]]
                cur_img_name = cur_res_dir / f"{str(img_index + 1)}_{correction[img_index]}_{Path(img_name).name}"
                shutil.copy(img_name, cur_img_name)

    metric.reset()
    logger.info(f"mAP: {mAP:.1%}")
    for r in [1, 5, 10]:
        logger.info(f"CMC curve, Rank-{r:<3}:{cmc[r - 1]:.2%}")
    return cmc, mAP


if __name__ == '__main__':
    import sys, traceback

    try:
        clk()
    except Exception as exp:
        if isinstance(exp, KeyboardInterrupt):
            logger.info("User aborted")
        else:
            error_class = exp.__class__.__name__
            detail = exp.args[0]
            cl, exc, tb = sys.exc_info()
            for lastCallStack in traceback.extract_tb(tb):
                logger.error(f"File \"{lastCallStack.filename}\", line {lastCallStack.lineno}, "
                             f"in {lastCallStack.name}: {lastCallStack.line}")
            logger.error(f"[{error_class}] {detail}")
