import ray
import torch


@ray.remote(num_gpus=1)
def gpu_test():
    """
    A remote function that uses GPU.
    It performs a simple matrix multiplication on GPU
    and returns the norm of the result tensor.
    """
    # Check if CUDA is available (should be True if GPU is accessible)
    cuda_ok = torch.cuda.is_available()
    
    # Do a small GPU-based matrix multiplication
    if cuda_ok:
        device = torch.device("cuda:0")
        a = torch.randn((1000, 1000), device=device)
        b = torch.randn((1000, 1000), device=device)
        c = torch.matmul(a, b)
        result_norm = c.norm().item()
        return f"GPU is accessible. Computed norm: {result_norm:.3f}"
    else:
        return "GPU is NOT accessible. torch.cuda.is_available() = False."


@ray.remote(num_cpus=1)
def cpu_test():
    """
    A remote function that uses only CPU.
    It performs a simple matrix multiplication on CPU
    and returns the norm of the result tensor.
    """
    device = torch.device("cpu")
    x = torch.randn((500, 500), device=device)
    y = torch.randn((500, 500), device=device)
    z = torch.matmul(x, y)
    return f"CPU task done. Computed norm: {z.norm().item():.3f}"


def main():
    # Initialize Ray. If you have custom resources or are in a container,
    # you can add arguments here, e.g., ray.init(resources={"local_node": 1})
    ray.init(num_gpus=1)

    # Submit tasks to the CPU and GPU.
    gpu_future = gpu_test.remote()
    cpu_future = cpu_test.remote()

    # Retrieve results.
    gpu_result = ray.get(gpu_future)
    cpu_result = ray.get(cpu_future)

    print(gpu_result)
    print(cpu_result)

    # Shutdown Ray explicitly (optional, but a good practice).
    ray.shutdown()


if __name__ == "__main__":
    main()
