import os
import torch.distributed
import argparse
import yaml
import config
import os
import train


def init():
    parser = argparse.ArgumentParser(description='change experiment args')
    parser.add_argument('--batch-size', help='batch size', type=int, default=8)
    parser.add_argument('--image-size', help='cropped image size', type=int, default=512)
    parser.add_argument('--num-workers', help='number of workers', type=int, default=4)
    parser.add_argument('--lr', help='the base learning rate', type=float, default=0.001)
    parser.add_argument('--dir', help='dataset root', type=str)
    parser.add_argument('--dataset', help='choose which dataset,[ade20k,voc2012]', type=str, default='ade20k')
    parser.add_argument('--gpus', help='gpus devices,if gpus is -1, it will use cpu', type=int, nargs='+', default=[0])
    parser.add_argument('--epochs', help='epochs', type=int, default=100)
    parser.add_argument('--yaml', help='yaml file path, edit yaml file for setting more experiment args', type=str)
    parser.add_argument('--wandb', help='use wandb', type=bool, default=False)
    args = parser.parse_args()
    if args.yaml is not None:
        with open(args.yaml, 'r') as f:
            deep_learning_yaml = yaml.load(f, yaml.Loader)
    else:
        deep_learning_yaml = config.DeepLearningYaml()

    deep_learning_yaml.__dict__.update(args.__dict__)
    return deep_learning_yaml


if __name__ == '__main__':
    # Use Distributed DataParallel Mode to making multiple gpu training.

    deep_learning_args = init()
    rank = int(os.environ['LOCAL_RANK'])
    world_size = int(os.environ['WORLD_SIZE'])
    torch.distributed.init_process_group("gloo", rank=rank, world_size=world_size)
    train.train(rank, deep_learning_args)


# torchrun --standalone --nnodes 1 --nproc_per_node 2 main.py --batch-size 8 --image-size 512 --num-workers 2 --lr 0.01 --dir /datasets/ADEChallengeData2016 --dataset ade20k --gpus 0 1 --epochs 50