File size: 2,655 Bytes
36fdbcf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import logging
import os
import sys
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
if ROOT_DIR not in sys.path:
sys.path.insert(0, ROOT_DIR)
from src.utils.util import setup_logger
from src.config.config_args import *
from src.processor.trainer import Trainer
import torch.multiprocessing as mp
import torch
import torch.distributed as dist
import numpy as np
import random
from torch.backends import cudnn
def init_seeds(seed=0, cuda_deterministic=True):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
if cuda_deterministic: # slower, more reproducible
cudnn.deterministic = True
cudnn.benchmark = False
else: # faster, less reproducible
cudnn.deterministic = False
cudnn.benchmark = True
def device_config(args):
try:
args.nodes = 1
args.ngpus_per_node = len(args.gpu_ids)
args.world_size = args.nodes * args.ngpus_per_node
except RuntimeError as e:
print(e)
def setup(rank, world_size):
# initialize the process group
dist.init_process_group(
backend='nccl',
#init_method=f'tcp://127.0.0.1:{args.port}',
init_method=f'tcp://127.0.0.1:12361',
world_size=world_size,
rank=rank
)
def main_worker(rank, args):
setup(rank, args.world_size)
torch.cuda.set_device(rank)
args.num_workers = int(args.num_workers / args.ngpus_per_node)
args.device = torch.device(f"cuda:{rank}")
args.rank = rank
init_seeds(1 + rank)
log_name = 'train_' + args.save_name
setup_logger(logger_name=log_name, root=args.save_dir,
level=logging.INFO if rank in [-1, 0] else logging.WARN, screen=True, tofile=True)
logger = logging.getLogger(log_name)
logger.info(str(args))
Trainer(args, logger).run()
cleanup()
def main():
args = parser.parse_args()
check_and_setup_parser(args)
if args.ddp:
mp.set_sharing_strategy('file_system')
device_config(args)
mp.spawn(
main_worker,
nprocs=args.world_size,
args=(args, )
)
else:
log_name = 'train_' + args.save_name
setup_logger(logger_name=log_name, root=args.save_dir, screen=True, tofile=True)
logger = logging.getLogger(log_name)
logger.info(str(args))
args.rank = -1
Trainer(args, logger).run(),
def cleanup():
dist.destroy_process_group()
if __name__ == "__main__":
main()
|