import os
import time
import cv2
import ray
from ultralytics import YOLO
from xml.etree.ElementTree import Element, SubElement, ElementTree

def _save_results(img_path, image, result, result_save_path, class_names):
    """
    Save inference results (visualized image + Pascal VOC XML annotation).
    """
    annotated_img = result.plot()
    base_name = os.path.basename(img_path)
    save_path = os.path.join(result_save_path, base_name)
    cv2.imwrite(save_path, annotated_img)

    xml_base_name = os.path.splitext(base_name)[0] + ".xml"
    xml_save_path = os.path.join(result_save_path, xml_base_name)

    annotation = Element("annotation")
    SubElement(annotation, "filename").text = base_name

    height, width, depth = image.shape
    size_elem = SubElement(annotation, "size")
    SubElement(size_elem, "width").text = str(width)
    SubElement(size_elem, "height").text = str(height)
    SubElement(size_elem, "depth").text = str(depth)

    det_boxes = result.boxes
    if det_boxes is not None and len(det_boxes) > 0:
        for box in det_boxes:
            cls_id = int(box.cls[0].item())
            conf_val = float(box.conf[0].item())
            xyxy = box.xyxy[0].cpu().numpy()  # [xmin, ymin, xmax, ymax]
            xmin, ymin, xmax, ymax = xyxy

            obj_elem = SubElement(annotation, "object")
            cls_name = class_names.get(cls_id, f"class_{cls_id}")
            SubElement(obj_elem, "name").text = cls_name
            SubElement(obj_elem, "confidence").text = f"{conf_val:.3f}"

            bndbox = SubElement(obj_elem, "bndbox")
            SubElement(bndbox, "xmin").text = str(int(xmin))
            SubElement(bndbox, "ymin").text = str(int(ymin))
            SubElement(bndbox, "xmax").text = str(int(xmax))
            SubElement(bndbox, "ymax").text = str(int(ymax))

    tree = ElementTree(annotation)
    tree.write(xml_save_path, encoding='utf-8', xml_declaration=False)


@ray.remote
class InferenceWorker:
    """
    A Ray Actor that holds a YOLO model and performs inference on request.
    """
    def __init__(self, device, weight_file, result_save_path, threshold, img_size):
        """
        :param device: "cpu" or "cuda:0", etc.
        :param weight_file: Path to the YOLO weight (.pt file).
        :param result_save_path: Directory to save results.
        :param threshold: Confidence threshold.
        :param img_size: Inference image size.
        """
        self.device = device
        self.weight_file = weight_file
        self.result_save_path = result_save_path
        self.threshold = threshold
        self.img_size = img_size

        # Create result directory if not exist
        os.makedirs(self.result_save_path, exist_ok=True)

        # Load the YOLO model.
        self.model = YOLO(self.weight_file)
        self.model.to(self.device)
        self.class_names = self.model.names if hasattr(self.model, 'names') else {}

    def run_inference(self, img_path):
        """
        Perform inference on a single image and save the result.
        """
        print(f"[Worker on {self.device}] Inference on {img_path}")
        image = cv2.imread(img_path)
        if image is None:
            print(f"[Worker on {self.device}] Cannot read image: {img_path}")
            return f"Failed: {img_path}"

        # Run inference
        results = self.model.predict(image, conf=self.threshold, device=self.device, imgsz=self.img_size)

        # Save results
        _save_results(img_path, image, results[0], self.result_save_path, self.class_names)

        return f"Success: {img_path}"


class CEDInference:
    """
    A simplified Ray-based inference manager that can create multiple executors
    (CPU or GPU) and dispatch inference requests in a round-robin manner.
    """

    def __init__(
        self,
        weight_file: str,
        gpu_executors: dict,
        result_save_path: str,
        threshold: float = 0.25,
        img_size: int = 640
    ):
        """
        :param weight_file: Path to the YOLO weight (.pt file).
        :param gpu_executors: Dict like:
            {
                "nodeA": {"cuda:0": 2, "cuda:1": 2},
                "nodeB": {"cuda:0": 1, "cuda:1": 3}
            }
            or simply {"local_node": {"cpu": 1, "cuda:0": 2}} for single node.
        :param result_save_path: Directory to save inference results.
        :param threshold: Confidence threshold for object detection.
        :param img_size: Inference image size.
        """
        if not ray.is_initialized():
            # You can specify address="auto" or other parameters if needed.
            ray.init()

        self.weight_file = weight_file
        self.gpu_executors = gpu_executors
        self.result_save_path = result_save_path
        self.threshold = threshold
        self.img_size = img_size

        os.makedirs(self.result_save_path, exist_ok=True)

        # Create a list of InferenceWorker actors.
        self.workers = []
        self._create_executors()

        # This will store references to futures returned by run_inference.remote(...)
        self.inference_futures = []

        # Round-robin index
        self.current_idx = 0

    def _create_executors(self):
        """
        Create the InferenceWorker actors based on gpu_executors config.
        """
        for node_label, device_config in self.gpu_executors.items():
            for device, num_procs in device_config.items():
                for _ in range(num_procs):
                    # Prepare resource options (if needed).
                    options_kwargs = {
                        "resources": {node_label: 0.001}  # Force scheduling to the specified node, if you have such resources.
                    }
                    # Decide whether to assign GPU or CPU in Ray based on device name.
                    if device.lower() == "cpu":
                        options_kwargs["num_cpus"] = 1
                    else:
                        options_kwargs["num_gpus"] = 1

                    worker_actor = InferenceWorker.options(**options_kwargs).remote(
                        device=device,
                        weight_file=self.weight_file,
                        result_save_path=self.result_save_path,
                        threshold=self.threshold,
                        img_size=self.img_size
                    )
                    self.workers.append(worker_actor)

    def read_input_image(self, img_path: str):
        """
        Dispatch an image to one of the workers in a round-robin manner.
        """
        if not self.workers:
            print("No workers available. Please check configuration.")
            return

        # Select the next worker
        worker_idx = self.current_idx % len(self.workers)
        self.current_idx += 1

        future = self.workers[worker_idx].run_inference.remote(img_path)
        self.inference_futures.append(future)

    def wait_until_done(self):
        """
        Wait for all queued inference tasks to complete.
        """
        if self.inference_futures:
            results = ray.get(self.inference_futures)
            print("Inference results:", results)
        else:
            print("No inference tasks were submitted.")
        self.inference_futures.clear()
