# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (C) 2021 THL A29 Limited, a Tencent company.  All rights reserved.
# The below software in this distribution may have been modified by THL A29 Limited ("Tencent Modifications").
# All Tencent Modifications are Copyright (C) THL A29 Limited.
import copy
import random
import warnings

import numpy as np
import torch
from mmcv.parallel import MMDistributedDataParallel, MMDataParallel
from mmcv.runner import DistSamplerSeedHook, build_optimizer, build_runner
from mmcv.runner.hooks import EvalHook, DistEvalHook

from drugood.core import DistOptimizerHook
from drugood.datasets import build_dataloader, build_dataset
from drugood.utils import get_root_logger

# TODO import optimizer hook from mmcv and delete them drugood
try:
    from mmcv.runner import Fp16OptimizerHook
except ImportError:
    warnings.warn('DeprecationWarning: FP16OptimizerHook drugood will be '
                  'deprecated. Please install mmcv>=1.1.4.')
    from drugood.core import Fp16OptimizerHook


def set_random_seed(seed, deterministic=False):
    """Set random seed.
    Args:
        seed (int): Seed to be used.
        deterministic (bool): Whether to set the deterministic option for
            CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
            to True and `torch.backends.cudnn.benchmark` to False.
            Default: False.
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    if deterministic:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False


def train_model(model,
                dataset,
                cfg,
                distributed=False,
                validate=False,
                timestamp=None,
                device='cuda',
                meta=None):
    logger = get_root_logger(cfg.log_level)

    # prepare data loaders
    dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
    data_loaders = [
        build_dataloader(
            ds,
            cfg.data.samples_per_gpu,
            cfg.data.workers_per_gpu,
            num_gpus=len(cfg.gpu_ids),
            dist=distributed,
            round_up=True,
            seed=cfg.seed) for ds in dataset]
    # put model on gpus
    if distributed:
        find_unused_parameters = cfg.get('find_unused_parameters', False)
        # Sets the `find_unused_parameters` parameter in
        # torch.nn.parallel.DistributedDataParallel
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False,
            find_unused_parameters=find_unused_parameters)
    else:
        if device == 'cuda':
            model = MMDataParallel(
                model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
        elif device == 'cpu':
            model = model.cpu()
        else:
            raise ValueError(F'unsupported device name {device}.')

    # build runner
    # save cfg to runner
    try:
        optimizer = build_optimizer(model, cfg.optimizer)
    except (TypeError, KeyError) as ex:
        logger.exception(ex)
        optimizer = None

    if cfg.get('runner') is None:
        cfg.runner = {
            'type': 'EpochBasedRunner',
            'max_epochs': cfg.total_epochs
        }
        warnings.warn(
            'config is now expected to have a `runner` section, '
            'please set `runner` in your config.', UserWarning)

    kwarg = {} if cfg.runner.get('type') == "EpochBasedRunner" else dict(cfg=cfg)

    runner = build_runner(
        cfg.runner,
        default_args=dict(
            model=model,
            batch_processor=None,
            optimizer=optimizer,
            work_dir=cfg.work_dir,
            logger=logger,
            meta=meta,
            **kwarg
        ))

    # an ugly walkaround to make the .log and .log.json filenames the same
    runner.timestamp = timestamp

    # fp16 setting
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        optimizer_config = Fp16OptimizerHook(
            **cfg.optimizer_config, **fp16_cfg, distributed=distributed)
    elif distributed and 'type' not in cfg.optimizer_config:
        optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
    else:
        optimizer_config = cfg.optimizer_config

    # register hooks
    runner.register_training_hooks(
        cfg.lr_config,
        optimizer_config,
        cfg.checkpoint_config,
        cfg.log_config,
        cfg.get('momentum_config', None),
        custom_hooks_config=cfg.get('custom_hooks', None))

    if distributed:
        runner.register_hook(DistSamplerSeedHook())

    # register eval hooks
    if validate:
        val_datasets = [v for k, v in cfg.data.items() if isinstance(v, dict) and k not in ["train"]]
        for val in val_datasets:
            val_dataset = build_dataset(val, dict(test_mode=True))
            val_dataloader = build_dataloader(
                val_dataset,
                samples_per_gpu=cfg.data.samples_per_gpu,
                workers_per_gpu=cfg.data.workers_per_gpu,
                dist=distributed,
                num_gpus=len(cfg.gpu_ids),
                shuffle=False,
                round_up=True)
            eval_cfg = copy.deepcopy(cfg.get('evaluation', {}))
            if val.get("save_best", None):
                eval_cfg["save_best"] = f'{val["split"]}:{val["save_best"]}'
                eval_cfg["rule"] = val["rule"]

            eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
            eval_hook = DistEvalHook if distributed else EvalHook
            hook_builder = runner.register_hook if cfg.runner['type'] == 'EpochBasedRunner' \
                else runner.register_validation_hooks
            hook_builder(eval_hook(val_dataloader, **eval_cfg))

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from,
                               revise_keys=[(r'^module.', ''), (r'backbone', 'tasker.backbone')])

    logger.info('start runner...')
    runner.run(data_loaders, cfg.workflow)
