import argparse
import yaml
import shutil
import os
import time
import torch
import torch.multiprocessing as mp
from common.utils.config_ import cfg, cfg_from_yaml_file, merge_new_config_file
from core.main import main_worker
torch.set_num_threads(1)


parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--cfg', default='./experiments/base_config.yaml', type=str)


def main():
    print('load config file')
    args = parser.parse_args()
    config = cfg_from_yaml_file('./experiments/base_config.yaml', cfg)
    config = config = merge_new_config_file(config, args.cfg)

    if config.cudnn:
        print('use cudnn')
        torch.backends.cudnn.enabled = True
    else:
        print('do not use cudnn')
        torch.backends.cudnn.enabled = False
    if not os.path.exists(config.save_path):
        os.makedirs(config.save_path)

    shutil.copy(args.cfg, config.save_path)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(config.used_device)
    ngpu = torch.cuda.device_count()
    print('num of available gpu is : ', ngpu)
    if config.distributed:
        mp.spawn(main_worker, nprocs=ngpu, args=(ngpu, config))
    else:
        assert config.distributed


if __name__ == '__main__':
    print('begin!!!')
    main()
