import os
import torch
import torch.distributed as dist 


def dist_init(world_size, rank, master_addr='localhost', master_port='12355'):
    # change it to the corresponding ip addr
    os.environ['MASTER_ADDR'] = master_addr
    os.environ['MASTER_PORT'] = master_port
    
    # initialize the process group
    dist.init_process_group(backend="nccl", rank=rank, world_size=world_size)
    assert dist.is_initialized(), "Error! The distributed env is not initialized!"

    return True


def get_local_rank():
    # get the local rank (devices id)
    if not dist.is_initialized():
        return 0
    else:
        return dist.get_rank()


def get_world_size():
    if not dist.is_initialized():
        return 1
    else:
        return dist.get_world_size()


def init_parameters(model):
    # Boradcast the initial gradients of the model parameters
    if get_world_size() > 1:
        for param in model.parameters():
            dist.broadcast(param.data,0)


def allreduce_average_gradients_dsq(model):
    size = float(dist.get_world_size())
    for param in model.parameters():
        dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
        param.grad.data /= size
        
    torch.cuda.synchronize()

def allreduce_average_gradients(model):
    rank, size = dist.get_rank(), dist.get_world_size()
    left, right = ((rank-1)+size)%size, (rank+1)%size 
    # 所有进程组成一个环,left,right表示这个环上本进程邻近的两个进程的 rank
    
    for param in model.parameters():
        assert isinstance(param.grad.data, torch.Tensor)
        send_buf, recv_buf = param.grad.data.clone(), param.grad.data.clone()
        # print(param.grad.data, send_buf, recv_buf)
        # print(f'Device {rank}, grad{param.grad.data.device}, send_buf{send_buf.device}, recv_buf{recv_buf.device}')
        # ring reduce
        for i in range(size-1):
            if i%2 == 0:
                send_req = dist.isend(send_buf, right)
                dist.recv(recv_buf, left)
                param.grad.data.add_(recv_buf)
            else:
                send_req = dist.isend(recv_buf, right)
                dist.recv(send_buf, left)
                param.grad.data.add_(send_buf)
            # print(f'Device {rank}, grad{param.grad.data.device}, send_buf{send_buf.device}, recv_buf{recv_buf.device}')
            # print('Device: %d is waiting' % (dist.get_rank()))
            send_req.wait()
        
        param.grad.data.divide_(float(size))
        # print(f'Device {rank}, grad{param.grad.data.device}, send_buf{send_buf.device}, recv_buf{recv_buf.device}')
        
        
    # print('Device: %d finished its allreduce' % (dist.get_rank()))
            

def allgather_average_gradients(model):
    raise NotImplementedError