from datetime import timedelta
from doctest import master
from re import T
import torch
from torch import nn, optim
import torch.distributed as dist
from torch.distributed import init_process_group, destroy_process_group 
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import torch.multiprocessing as mp
import os
import argparse


def ddp_setup(rank, world_size):
    """
    setup the distribution process group

    Args:
        rank: Unique identifier of each process
        world_size: Total number of processes
    """
    # MASTER Node（运行 rank0 进程，多机多卡时的主机）用来协调各个 Node 的所有进程之间的通信
    os.environ["MASTER_ADDR"] = "localhost" # 由于这里是单机实验所以直接写 localhost
    os.environ["MASTER_PORT"] = "12355"     # 任意空闲端口
    init_process_group(
        backend="nccl",                     # Nvidia CUDA CPU 用这个 "nccl"
        rank=rank,                          
        world_size=world_size
    )
    torch.cuda.set_device(rank)

def run(rk, ws):
    print('init begin')
    os.environ['MASTER_ADDR'] = '210.47.18.191'
    os.environ['MASTER_PORT'] = '8000'
    init_process_group(backend='nccl', init_method='tcp://210.47.18.191:8000', rank=rk, world_size=ws, MASTER_ADDR='210.47.18.191', MASTER_PORT='8000')
    print('init begin')
    dist.destroy_process_group()


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='simple distributed training job')
    parser.add_argument('--total-epochs', type=int, default=50, help='Total epochs to train the model')
    parser.add_argument('--save-every', type=int, default=10, help='How often to save a snapshot')
    parser.add_argument('--batch_size', default=32, type=int, help='Input batch size on each device (default: 32)')
    args = parser.parse_args()
    world_size = 2 
    mp.spawn(run, args=(world_size,), nprocs=world_size, join=True)
    # init_process_group(backend='nccl', init_method='tcp://210.47.18.191:8000', rank=0, world_size=2)
    # # server_store = dist.TCPStore("10.106.144.104", 8000, 2, True, timedelta(seconds=30))
    # # init_process_group(init_method='tcp://10.106.144.104:8000', backend='nccl', rank=0, world_size=1)
    # print(dist.is_available())
    # dist.destroy_process_group()
    # print('end')