import random
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
import numpy as np
import argparse
import os
from tasks import Processor, Processor_ddp
from utils import get_config


def parse_args():
    parser = argparse.ArgumentParser("PaddleVideo train script")
    parser.add_argument('--gpu',
                        type=str,
                        default='0,1',
                        help='set gpu ids')
    parser.add_argument('-c',
                        '--config',
                        type=str,
                        default='configs/recognition/agcn/agcn_fsd.yaml',
                        help='config file path')
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help='random seed')
    parser.add_argument('--override',
                        action='append',
                        default=[],
                        help='config options to be overridden')
    parser.add_argument('--test',
                        action='store_true',
                        help='whether to test a model')
    parser.add_argument('-w',
                        '--weights',
                        type=str,
                        default=None,
                        help='weights for finetuning or testing')
    parser.add_argument('-o',
                        '--output_dir',
                        type=str,
                        default=None,
                        help='the output dir for the training model')
    parser.add_argument('-r',
                        '--resumes',
                        type=str,
                        default=None,
                        help='file path for resuming')
    parser.add_argument('--amp',
                        action='store_true',
                        help='whether to open amp training.')

    args = parser.parse_args()
    return args


def same_seeds(seed=1234):
    np.random.seed(seed)  # Numpy module.
    random.seed(seed)  # Python random module.
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU.
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True


def train_one_gpu(args):
    """
    :param args: argparse
    """
    cfg = get_config(args.config, overrides=args.override)
    cfg.update({'amp': args.amp, 'output_dir': args.output_dir,
                'weights': args.weights, 'resumes': args.resumes})

    same_seeds(args.seed)
    proc = Processor(cfg)
    if args.test:
        weights_file = args.weights
        proc.test(weights_file)
    else:
        proc.train()


def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    # initialize the process group
    dist.init_process_group(
        backend="nccl",  # 是通信所用的后端，可以是"ncll" "gloo"或者是一个torch.distributed.Backend类
        # init_method= env://,  # 这个URL指定了如何初始化互相通信的进程,默认为"env://"
        world_size=world_size,  # 分布式训练所有进程数目=GPU个数
        rank=rank  # 进程的编号，也是其优先级
    )


def cleanup():
    dist.destroy_process_group()


def train_multi_gpus(rank, world_size, args):
    """ Main entry-point into the program
        :param rank: current device rank
        :param args: argparse
        :returns: None
        :rtype: None
    """
    setup(rank, world_size)
    cfg = get_config(args.config, overrides=args.override, show=True if rank == 0 else False)
    cfg.update({'rank': rank, 'amp': args.amp, 'output_dir': args.output_dir,
                'weights': args.weights, 'resumes': args.resumes})
    same_seeds(args.seed)
    proc = Processor_ddp(cfg)
    if args.test and rank == 0:
        weights_file = args.weights
        proc.test(weights_file)
    else:
        proc.train()
    cleanup()


if __name__ == '__main__':
    args = parse_args()
    gpu_list = [int(x) for x in args.gpu.split(',')]
    gpu_num = len(gpu_list)
    gpu_str = ','.join([str(x) for x in gpu_list])
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu_str
    print('export CUDA_VISIBLE_DEVICES={}'.format(gpu_str))
    if gpu_num >= 2:  # only consider the case of Single-Node Multi-GPU
        print(torch.cuda.device_count(), gpu_num)
        assert torch.cuda.device_count() >= gpu_num, \
            "The avaliable gpu nums must be greater than quantity required"
        mp.spawn(train_multi_gpus,
                 nprocs=gpu_num,
                 args=(gpu_num, args),
                 join=True)
    else:
        train_one_gpu(args)
