import torch
import os.path
import random
import argparse
import numpy as np
import torch.multiprocessing as mp
from wwengine.datasets import (build_dataset, build_dataloader)
from wwengine.models import build_model
from wwengine.config import Config
from wwengine.runner import (EpochBasedRunner, build_optimizer)
from wwengine.utils import get_time_str, get_root_logger, Recorder
from wwengine.runner.evaluation import mAP3d

os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'


def set_random_seed(seed=0, deterministic=False):
    """Set random seed.

    Args:
        seed (int): Seed to be used.
        deterministic (bool): Whether to set the deterministic option for
            CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
            to True and `torch.backends.cudnn.benchmark` to False.
            Default: False.
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    if deterministic:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False


def main(gpu_id, args):
    cfg = Config.fromfile(args.cfg)
    # 初始化
    torch.distributed.init_process_group("gloo", rank=gpu_id, world_size=cfg["RUNNER"]["gpu_num"])
    torch.cuda.set_device(gpu_id)
    set_random_seed()

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    tasks = cfg['TASKS']
    workflow = cfg["RUNNER"]['workflow']
    # 根据工作流读取数据
    data_loaders = dict()
    for dataset_type, _ in workflow.items():
        if not dataset_type in cfg['DATASETS']:
            continue
        dataset = build_dataset(cfg['DATASETS'], tasks, dataset_type)
        data_loader = build_dataloader(
            dataset,
            cfg["RUNNER"]["samples_per_gpu"],
            cfg["RUNNER"]["workers_per_gpu"],
            cfg["RUNNER"]["gpu_num"])
        data_loaders[dataset_type] = data_loader

    pointpillars = build_model(cfg['MODELS'], train_cfg=cfg['train_cfg'], test_cfg=cfg['test_cfg'])

    pointpillars = torch.nn.parallel.DistributedDataParallel(pointpillars.to(gpu_id), device_ids=[gpu_id])

    optimizer = build_optimizer(pointpillars, cfg["RUNNER"]["train_cfg"]['optimizer'])

    recorder = Recorder(cfg)
    recorder.backup_cfg_file()
    evaluator = mAP3d(recorder.record_dir, cfg['class2label'])

    log_file = os.path.join(recorder.record_dir, f'recorder.log')
    logger = get_root_logger(log_file=log_file)

    runner = EpochBasedRunner(
        model=pointpillars,
        optimizer=optimizer,
        evaluator=evaluator,
        work_dir=recorder.record_dir,
        logger=logger,
        meta=None,
        gpu_id=gpu_id)

    runner.register_training_hooks(cfg["RUNNER"]["train_cfg"]['lr_config'],
                                   cfg["RUNNER"]["train_cfg"]['optimizer_config'],
                                   cfg["RUNNER"]["train_cfg"]['checkpoint_config'],
                                   cfg["RUNNER"]["train_cfg"]['log_config'],
                                   cfg["RUNNER"]["train_cfg"].try_get('momentum_config'))

    if cfg["RUNNER"]['resume_from']:
        runner.resume(cfg["RUNNER"]['resume_from'])
    elif cfg["RUNNER"]['load_from']:
        runner.load_checkpoint(cfg["RUNNER"]['load_from'])

    runner.run(data_loaders, workflow, cfg["RUNNER"]['max_epoch'])

    print('finished')


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Configuration Parameters')
    parser.add_argument('--cfg', type=str, default='../unit_test/files/default.json')
    args = parser.parse_args()

    mp.spawn(main,
             args=(args,),
             nprocs=1,
             join=True)

    # meta = dict()
    # env_info_dict = collect_env()
    # env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
    # dash_line = '-' * 60 + '\n'
    # logger.info('Environment info:\n' + dash_line + env_info + '\n' +
    #             dash_line)
    # meta['env_info'] = env_info
    # meta['config'] = cfg.pretty_text
    # meta['seed'] = args.seed
    # meta['exp_name'] = osp.basename(args.config)

    runner = EpochBasedRunner(
        net,
        optimizer=None,
        work_dir=work_dir,
        logger=logger,
        meta=None)

    print(net)
