import glob
import os
import sys
sys.path.append(os.getcwd())
from ced_inference_ray import CEDInference
import ray

def test1():
    
    # 1) 连接到已启动的 Ray 集群。如果你在其中一台机器上作为 Head 节点，可以：
    #    ray.init(address="auto")
    #
    #    或者如果你手动使用本脚本来启动 head，可以保留 resources=... 
    #    但多节点通常在命令行启动。见后面的 README 步骤。
    ray.init(address="auto")

    gpu_config = {
        "nodeA": {"cuda:0": 1},
        "nodeB": {"cuda:0": 1}
    }

    # gpu_config = {
    #     "nodeA": {
    #         "cuda:0": 1,
    #         "cpu": 1
    #     }
    # }

    # 其余代码可保持原样
    model_weights = "/home/suojiashun/Dev/YOLO_test/ced_inference_yolo/weights/best_0809.pt"
    save_dir = "/home/suojiashun/Dev/YOLO_test/ced_inference_yolo/results"
    conf_thres = 0.5

    # Create the inference instance
    inference_instance = CEDInference(
        weight_file=model_weights,
        gpu_executors=gpu_config,
        result_save_path=save_dir,
        threshold=conf_thres,
        img_size=640
    )

    # Prepare images
    images_folder = "/home/suojiashun/Dev/YOLO_test/ced_inference_yolo/img_all"
    exts = [".jpg", ".jpeg", ".png", ".bmp"]
    all_imgs = [
        f for f in glob.glob(os.path.join(images_folder, "*"))
        if os.path.splitext(f)[1].lower() in exts
    ]

    # Dispatch images in a round-robin manner
    for img_file in all_imgs:
        inference_instance.read_input_image(img_file)

    # Wait until all inference tasks finish
    inference_instance.wait_until_done()
    print("All inference tasks have been completed.")


if __name__ == "__main__":

    test1()