# Copyright (c) OpenMMLab. All rights reserved.
import os
import argparse
from mmengine.config import Config
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import setup_cache_size_limit_of_dynamo
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.evaluation import DumpDetResults
from metrics import CoCoExMetric

# =======================================================================
# 训练mmdetection框架的模型，参数定义如下：
# base_cfg_path:            mmdetection基本训练配置文件
# user_cfg_dict:            用户输入的配置字典，和基础参数进行merge


def train(base_cfg_path, user_cfg_dict, output_dir):
    # Reduce the number of repeated compilations and improve
    # training speed.
    setup_cache_size_limit_of_dynamo()

    # load config
    cfg = Config.fromfile(base_cfg_path)
    cfg.launcher = 'none'
    cfg.work_dir = output_dir
    auto_scale_lr = True
    amp = False

    # merge user-defined config
    # merge_user_cfgs( cfg, user_cfg_path )
    if (cfg.train_dataloader.dataset.type == 'RepeatDataset'):
        # If it's RepeatDataset, copy user_cfg_dict's dataset to cfg's nested dataset
        if 'train_dataloader' in user_cfg_dict and 'dataset' in user_cfg_dict['train_dataloader']:
            cfg.train_dataloader.dataset.dataset = user_cfg_dict['train_dataloader']['dataset']
            cfg.train_dataloader.dataset.dataset.pipeline = cfg.train_pipeline
        # Remove the dataset configuration from the user config to avoid merging it again
        user_cfg_dict_others = user_cfg_dict.copy()
        if 'train_dataloader' in user_cfg_dict:
            user_cfg_dict_others['train_dataloader'].pop('dataset', None)
        # merge other user_cfg_dict into cfg
        cfg.merge_from_dict(user_cfg_dict_others)
    else:
        cfg.merge_from_dict(user_cfg_dict)
    # cfg.dump('train.json')

    # # enable automatic-mixed-precision training
    if amp is True:
        cfg.optim_wrapper.type = 'AmpOptimWrapper'
        cfg.optim_wrapper.loss_scale = 'dynamic'

    # # enable automatically scaling LR
    if auto_scale_lr:
        if 'auto_scale_lr' in cfg and \
                'enable' in cfg.auto_scale_lr and \
                'base_batch_size' in cfg.auto_scale_lr:
            cfg.auto_scale_lr.enable = True
        else:
            raise RuntimeError('Can not find "auto_scale_lr" or '
                               '"auto_scale_lr.enable" or '
                               '"auto_scale_lr.base_batch_size" in your'
                               ' configuration file.')

    # build the runner from config
    if 'runner_type' not in cfg:
        # build the default runner
        runner = Runner.from_cfg(cfg)
    else:
        # build customized runner from the registry
        # if 'runner_type' is set in the cfg
        runner = RUNNERS.build(cfg)

    # start training
    runner.train()

# ==============================================================================


def eval(cfg_path, user_cfg_dict, checkpoint_path, output_dir):
    FLAGS = argparse.Namespace(
        show_dir='infer_images', show=False, wait_time=2)

    # Reduce the number of repeated compilations and improve
    # testing speed.
    setup_cache_size_limit_of_dynamo()

    # load config
    cfg = Config.fromfile(cfg_path)
    cfg.launcher = 'none'
    cfg.merge_from_dict(user_cfg_dict)
    cfg.load_from = checkpoint_path
    cfg.work_dir = output_dir
    cfg.show_dir = FLAGS.show_dir
    # cfg.dump('infer.json')

    if cfg.show_dir:
        cfg = trigger_visualization_hook(cfg, FLAGS)

    # 调整为CoCoExMetric
    basename = os.path.basename(checkpoint_path).split('.')[0]
    output_metric_dir = os.path.join(output_dir, 'test_%s' % basename)
    cfg.val_evaluator.type = 'CoCoExMetric'
    cfg.val_evaluator.output_dir = output_metric_dir
    cfg.test_evaluator.type = 'CoCoExMetric'
    cfg.test_evaluator.output_dir = output_metric_dir

    # build the runner from config
    if 'runner_type' not in cfg:
        # build the default runner
        runner = Runner.from_cfg(cfg)
    else:
        # build customized runner from the registry
        # if 'runner_type' is set in the cfg
        runner = RUNNERS.build(cfg)

    # add `DumpResults` dummy metric
    output_path = os.path.join(output_dir, 'infers.pkl')
    if output_path is not None:
        assert output_path.endswith(('.pkl', '.pickle')), \
            'The dump file must be a pkl file.'
        runner.test_evaluator.metrics.append(
            DumpDetResults(out_file_path=output_path))

    # start testing
    runner.test()
