import torch
import torch_npu
import os
import ctypes
import time
from mpi4py import MPI


comm = MPI.COMM_WORLD

rank = comm.Get_rank()
size = comm.Get_size()

# Set environment variable based on MPI size
os.environ['RANK_SIZE'] = str(size)

device_type = "npu" # "npu" or "cuda"

if device_type == "npu":
    torch.acc = torch.npu
else:
    torch.acc = torch.cuda

# Create a large tensor (1GB) directly in pinned memory
size = 2**30 // 4  # 1GB in float32 (4 bytes per element)


#set current process to numa node rank // 2
def set_numa_node():
    # Load the NUMA library
    libnuma = ctypes.cdll.LoadLibrary('libnuma.so.1')
    
    # Initialize NUMA
    if libnuma.numa_available() < 0:
        print("NUMA is not available")
        return False
    
    numa_node = rank // 2
    
    # Create and get the bitmask for the target NUMA node
    mask = libnuma.numa_allocate_nodemask()
    libnuma.numa_bitmask_clearall(mask)
    libnuma.numa_bitmask_setbit(mask, numa_node)
    
    # Set current process to run on target NUMA node
    libnuma.numa_run_on_node(numa_node)

    # Set the NUMA policy to bind to target node
    libnuma.numa_set_membind(mask)
    
    print(f"Process bound to NUMA node {numa_node}")
    return True

def measure_cpu2npu_bandwidth():
    # Create a new stream for this transfer
    stream = torch.acc.Stream(device=rank)
    torch.acc.set_stream(stream)
    print(f"Rank {rank}: Current stream with ID: {torch.acc.current_stream().stream_id}")
    
    # Initialize tensors
    size = 2**30 // 4  # 1GB in float32 (4 bytes per element)
    cpu_tensor = torch.randn(size, dtype=torch.bfloat16, pin_memory=True)  # Source tensor on CPU
    npu_tensor = torch.empty(size, dtype=torch.bfloat16, device=f"npu:{rank}")  # Pre-allocate NPU tensor

    # Warm-up transfer
    npu_tensor.copy_(cpu_tensor)
    stream.synchronize()
    
    # Make sure all ranks are ready
    comm.Barrier()
    
    # Measure time for the transfer operation
    start_time = time.time()
    npu_tensor.copy_(cpu_tensor, non_blocking=True)
    torch.acc.synchronize()
    end_time = time.time()
    
    elapsed_time = end_time - start_time
    if elapsed_time == 0:
        elapsed_time = float('inf')
    
    # Calculate bandwidth
    tensor_size_gb = cpu_tensor.nelement() * cpu_tensor.element_size() / (1024**3)  # Size in GB
    bandwidth = tensor_size_gb / elapsed_time  # GB/s
    
    # Print results rank by rank using barriers
    for r in range(comm.Get_size()):  # Loop through all ranks
        if r == rank:  # If it's this rank's turn
            print(f"Rank {rank}: Created new stream with ID: {stream.stream_id}")
            print(f"Rank {rank}: Transfer size: {tensor_size_gb:.2f} GB")
            print(f"Rank {rank}: start_time: {start_time:8f} ")
            print(f"Rank {rank}: end_time: {end_time:8f} ")
            print(f"Rank {rank}: Transfer time: {elapsed_time:.3f} seconds")
            print(f"Rank {rank}: Bandwidth: {bandwidth:.2f} GB/s")
            #print(f"Rank {rank}: First 10 elements: {npu_tensor[0:10].float()}")
            print()  # Add blank line between ranks
        comm.Barrier()  # Synchronize after each rank prints

def measure_npu2cpu_bandwidth():
    # Create a new stream for this transfer
    stream = torch.acc.Stream(device=rank)
    torch.acc.set_stream(stream)
    print(f"Rank {rank}: Current stream with ID: {torch.acc.current_stream().stream_id}")
    
    # Initialize tensors
    size = 2**30 // 4  # 1GB in float32 (4 bytes per element)
    cpu_tensor = torch.empty(size, dtype=torch.bfloat16, pin_memory=True)  # Pre-allocate CPU tensor
    npu_tensor = torch.randn(size, dtype=torch.bfloat16, device=f"npu:{rank}")  # Create on NPU directly

    # Warm-up transfer
    cpu_tensor.copy_(npu_tensor)
    stream.synchronize()
    
    # Make sure all ranks are ready
    comm.Barrier()
    
    # Measure time for the transfer operation
    start_time = time.time()
    cpu_tensor.copy_(npu_tensor, non_blocking=True)
    torch.acc.synchronize()
    end_time = time.time()
    
    elapsed_time = end_time - start_time
    if elapsed_time == 0:
        elapsed_time = float('inf')
    
    # Calculate bandwidth
    tensor_size_gb = cpu_tensor.nelement() * cpu_tensor.element_size() / (1024**3)  # Size in GB
    bandwidth = tensor_size_gb / elapsed_time  # GB/s
    
    # Print results rank by rank using barriers
    for r in range(comm.Get_size()):  # Loop through all ranks
        if r == rank:  # If it's this rank's turn
            print(f"Rank {rank}: Created new stream with ID: {stream.stream_id}")
            print(f"Rank {rank}: Transfer size: {tensor_size_gb:.2f} GB")
            print(f"Rank {rank}: start_time: {start_time:8f} ")
            print(f"Rank {rank}: end_time: {end_time:8f} ")
            print(f"Rank {rank}: Transfer time: {elapsed_time:.3f} seconds")
            print(f"Rank {rank}: Bandwidth: {bandwidth:.2f} GB/s")
            #print(f"Rank {rank}: First 10 elements: {cpu_tensor[0:10].float()}")
            print()  # Add blank line between ranks
        comm.Barrier()  # Synchronize after each rank prints

# Main execution
if __name__ == "__main__":
    # Set NUMA affinity before any tensor operations
    set_numa_node()
    
    # Synchronize all processes before starting measurements
    comm.Barrier()
    

    # Measure Host to Device (H2D) transfer
    measure_cpu2npu_bandwidth()
    
    # Ensure all processes complete H2D before starting D2H
    comm.Barrier()

    if rank == 0:
        print("================================")

    comm.Barrier()
    torch.npu.synchronize()
    
    # Measure Device to Host (D2H) transfer
    measure_npu2cpu_bandwidth()
