from dis import dis
from optparse import OptionGroup
from re import T
import torch
from torch import nn, optim
import torch.distributed as dist
from torch.distributed import init_process_group, destroy_process_group 
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import torch.multiprocessing as mp
import os
import argparse

def ddp_setup(rank, world_size):
    """
    setup the distribution process group

    Args:
        rank: Unique identifier of each process
        world_size: Total number of processes
    """
    # MASTER Node（运行 rank0 进程，多机多卡时的主机）用来协调各个 Node 的所有进程之间的通信
    os.environ["MASTER_ADDR"] = "210.47.18.191" # 由于这里是单机实验所以直接写 localhost
    os.environ["MASTER_PORT"] = "8000"     # 任意空闲端口
    os.environ["RANK"] = "0"
    os.environ["WORLD_SIZE"] = "2"
    init_process_group(
        backend="nccl"
    )
    torch.cuda.set_device(rank)

def reduce_tensor(tensor: torch.Tensor) -> torch.Tensor:
    rt = tensor.clone()
    dist.all_reduce(rt, op=dist.reduce_op.SUM)
    # rt /= dist.get_world_size()
    return rt

def distributed_concat(cat_tensor, num_total_examples):
    cat_tensor_len = torch.tensor(len(cat_tensor)).cuda()
    dist.all_reduce(cat_tensor_len, op=dist.ReduceOp.MIN)
    cat_tensor_len = cat_tensor_len.item()
    print(cat_tensor_len)
    cat_tensor = cat_tensor[:cat_tensor_len]
    output_tensor = [cat_tensor.clone() for _ in range(num_total_examples)]
    dist.all_gather(output_tensor, cat_tensor)
    output = torch.cat(output_tensor, dim=0)
    return output


def run(local_rank):
    print('init begin')
    # init_process_group(backend='nccl', init_method='tcp://210.47.18.191:8000', rank=rk, world_size=ws)
    # init_process_group(backend='nccl')
    os.environ["MASTER_ADDR"] = "210.47.18.191" # 由于这里是单机实验所以直接写 localhost
    os.environ["MASTER_PORT"] = "8000"     # 任意空闲端口
    os.environ["RANK"] = "0"
    os.environ["WORLD_SIZE"] = "3"
    init_process_group(
        backend="nccl"
    )
    print('init begin')

    t1 = torch.tensor([0, 1, 2, 3, 4, 5]).cuda()
    # reduced_t1 = reduce_tensor(t1.data)
    dist.all_reduce(t1, op=dist.ReduceOp.SUM)
    print(f't1 is {t1}')
    t2 = torch.tensor([0, 1, 2, 3, 4, 5]).cuda()
    print(f't2 is {t2}')
    t3 = torch.tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3]]).cuda()
    dist.reduce(t3, dst=0, op=dist.ReduceOp.SUM)
    print(f't3 is {t3}')
    t4 = torch.tensor([[1, 1], [2, 2], [3, 3], [4, 4], [100, 100]]).cuda()
    print(t4.device)
    out = distributed_concat(t4, dist.get_world_size())
    print(f'output is {out}')
    t6 = torch.tensor(21).cuda()
    dist.reduce(t6, dst=0, op=dist.ReduceOp.MIN)
    print(f't6 is {t6}')

    t5 = torch.tensor([1, 2, 3, 4, 5, 6]).cuda()
    out1 = distributed_concat(t5, dist.get_world_size())
    print(f'out1 is {out1}')
    # if dist.get_rank() == 0:
    #     # output = torch.cat(output_tensor, dim=0)
    #     # print(f'output is {output}')
    #     tl, temp = [], t4.clone()
    #     print(dist.get_world_size())
    #     for _ in range(1, int(dist.get_world_size()), 1):
    #         print(_)
    #         dist.recv(temp, src=_, tag=0)
    #         tl.append(temp.tolist())
    #         print(f'tl_1 is {tl}')
    #     print(f'tl is {tl}')
    # else:
    #     dist.send(t4, dst=0)
    dist.barrier()
    dist.destroy_process_group()


if __name__ == '__main__':
    print(os.environ)
    # print("|| MASTER_ADDR:",os.environ["MASTER_ADDR"],
    #     "|| MASTER_PORT:",os.environ["MASTER_PORT"],
    #     "|| LOCAL_RANK:",os.environ["LOCAL_RANK"],
    #     "|| RANK:",os.environ["RANK"], 
    #     "|| WORLD_SIZE:",os.environ["WORLD_SIZE"])
    print()

    # os.environ["MASTER_ADDR"] = "123.56.105.73"
    # # os.environ["MASTER_ADDR"] = "210.47.18.194"
    # os.environ["MASTER_PORT"] = "9000"
    # os.environ["RANK"] = "0"
    # os.environ["WORLD_SIZE"] = "1"
    # ddp_setup(0, 2)
    mp.spawn(run)
    # print('init begin')
    # # init_process_group(init_method='tcp://210.47.18.191:8000', backend='nccl', rank=1, world_size=2)
    # init_process_group(backend='nccl')
    # dist.barrier()
    # print('init end')
    # dist.destroy_process_group()
    # mp.spawn(run, args=(world_size,), nprocs=world_size, join=True)
    # init_process_group(backend='nccl', init_method='tcp://210.47.18.191:8000', rank=0, world_size=2)
    # # server_store = dist.TCPStore("10.106.144.104", 8000, 2, True, timedelta(seconds=30))
    # # init_process_group(init_method='tcp://10.106.144.104:8000', backend='nccl', rank=0, world_size=1)
    # print(dist.is_available())
    # dist.destroy_process_group()
    # print('end')