import os
import time
from multiprocessing import Process, Manager
from ultralytics import YOLO
import cv2
from xml.etree.ElementTree import Element, SubElement, ElementTree


def worker_process(
    task_queue,
    stop_event,
    device,
    process_idx,
    weight_file,
    result_save_path,
    threshold,
    batch_size,
    img_size
):
    """
    Worker function that:
    1. Batches the incoming images up to 'batch_size' and performs inference all at once.
    2. For each image in the batch, saves the annotated result and an XML file with detection info.
    3. If the queue is empty and 'stop_event' is set, it processes any remaining images and then exits.

    :param task_queue: A multiprocessing queue containing paths to images that need inference.
    :param stop_event: An event flag indicating when the worker process should stop.
    :param device: The GPU device string, e.g., 'cuda:0'.
    :param process_idx: An index (int) to differentiate multiple processes on the same device.
    :param weight_file: Path to the YOLO model weights (e.g. .pt).
    :param result_save_path: Path to the directory where annotated images and XML files are saved.
    :param threshold: Confidence threshold for predictions.
    :param batch_size: Number of images to process in one batch before running inference.
    :param img_size: The input resolution for inference (imgsz).
    """

    # Load the model and move it to the specified device
    model = YOLO(weight_file)
    model.to(device)

    # Attempt to retrieve class names from the model
    class_names = model.names if hasattr(model, 'names') else {}

    # Record the start time
    start_time = time.time()

    # Temporary list to hold (img_path, image) for the current batch
    batch_data = []

    while True:
        # Check if there is any data in the queue
        if not task_queue.empty():
            img_path = task_queue.get()
            if img_path is None:
                # If a None is encountered, skip it
                continue

            print(f"[Device: gpu:{device.split(':')[-1]}, Process: {process_idx}] Start inference: {img_path}")

            image = cv2.imread(img_path)
            if image is None:
                print(f"[Device: gpu:{device.split(':')[-1]}, Process: {process_idx}] Cannot read image: {img_path}")
                continue

            # Accumulate the (path, image) in the batch
            batch_data.append((img_path, image))

            # If we have reached the batch_size, perform batch inference
            if len(batch_data) == batch_size:
                images = [item[1] for item in batch_data]
                # Perform batch inference with the specified resolution
                results = model.predict(images, conf=threshold, device=f'cuda:{device}', imgsz=img_size)

                print(f"[Device: gpu:{device.split(':')[-1]}, Process: {process_idx}] Batch inference done. Batch size = {len(batch_data)}")

                # Save results for each item in the batch
                for (img_path_b, image_b), result in zip(batch_data, results):
                    _save_results(
                        img_path_b,
                        image_b,
                        result,
                        result_save_path,
                        class_names
                    )

                # Clear the batch
                batch_data = []

        else:
            # If the queue is empty, process any remaining data
            if len(batch_data) > 0:
                images = [item[1] for item in batch_data]
                results = model.predict(images, conf=threshold, device=f'cuda:{device}', imgsz=img_size)
                print(f"[Device: gpu:{device.split(':')[-1]}, Process: {process_idx}] Final batch inference done. Batch size = {len(batch_data)}")

                for (img_path_b, image_b), result in zip(batch_data, results):
                    _save_results(
                        img_path_b,
                        image_b,
                        result,
                        result_save_path,
                        class_names
                    )
                batch_data = []

            # If stop_event is set, break the loop and exit
            if stop_event.is_set():
                break
            else:
                # Otherwise, wait briefly and check again
                time.sleep(0.1)

    end_time = time.time()
    total_time = end_time - start_time
    print(f"[Device: gpu:{device.split(':')[-1]}, Process: {process_idx}] Exiting. Total run time: {total_time:.2f} seconds.")


def _save_results(img_path, image, result, result_save_path, class_names):
    """
    Saves the inference result for a single image:
    1. Draws and saves the annotated image.
    2. Creates and saves an XML file in Pascal VOC format.

    :param img_path: The file path of the original image.
    :param image: The original image array.
    :param result: The inference result from the YOLO model for this image.
    :param result_save_path: Directory to save the annotated image and XML.
    :param class_names: A dictionary of class IDs to class names.
    """

    # 1) Save annotated image
    annotated_img = result.plot()
    base_name = os.path.basename(img_path)
    save_path = os.path.join(result_save_path, base_name)
    cv2.imwrite(save_path, annotated_img)

    # 2) Create XML file (Pascal VOC-like format)
    xml_base_name = os.path.splitext(base_name)[0] + ".xml"
    xml_save_path = os.path.join(result_save_path, xml_base_name)

    annotation = Element("annotation")
    SubElement(annotation, "filename").text = base_name

    height, width, depth = image.shape
    size_elem = SubElement(annotation, "size")
    SubElement(size_elem, "width").text = str(width)
    SubElement(size_elem, "height").text = str(height)
    SubElement(size_elem, "depth").text = str(depth)

    det_boxes = result.boxes
    if det_boxes is not None and len(det_boxes) > 0:
        for box in det_boxes:
            cls_id = int(box.cls[0].item())
            conf_val = float(box.conf[0].item())
            xyxy = box.xyxy[0].cpu().numpy()  # [xmin, ymin, xmax, ymax]
            xmin, ymin, xmax, ymax = xyxy

            obj_elem = SubElement(annotation, "object")
            cls_name = class_names.get(cls_id, f"class_{cls_id}")
            SubElement(obj_elem, "name").text = cls_name
            SubElement(obj_elem, "confidence").text = f"{conf_val:.3f}"

            bndbox = SubElement(obj_elem, "bndbox")
            SubElement(bndbox, "xmin").text = str(int(xmin))
            SubElement(bndbox, "ymin").text = str(int(ymin))
            SubElement(bndbox, "xmax").text = str(int(xmax))
            SubElement(bndbox, "ymax").text = str(int(ymax))

    tree = ElementTree(annotation)
    tree.write(xml_save_path, encoding='utf-8', xml_declaration=False)


class CEDInference:
    """
    Manages YOLOv8 inference across multiple processes and devices. Distributes 
    image paths to worker processes for parallel inference.
    """

    def __init__(
        self,
        weight_file: str,
        gpu_executors: dict,
        result_save_path: str,
        threshold: float = 0.25,
        batch_size: int = 1,
        img_size: int = 640
    ):
        """
        :param weight_file: Path to the YOLO model weights (e.g. .pt file).
        :param gpu_executors: A dict specifying how many processes to spawn per device, 
                              e.g., {"cuda:0": 2, "cuda:1": 2}.
        :param result_save_path: Directory to save annotated images and XML files.
        :param threshold: Confidence threshold for detection.
        :param batch_size: Number of images per batch for inference.
        :param img_size: Resolution (imgsz) used for inference.
        """

        self.weight_file = weight_file
        self.gpu_executors = gpu_executors
        self.result_save_path = result_save_path
        self.threshold = threshold
        self.batch_size = batch_size
        self.img_size = img_size

        os.makedirs(self.result_save_path, exist_ok=True)

        self.manager = Manager()
        self.task_queue = self.manager.Queue()
        self.stop_event = self.manager.Event()
        self.processes = []

    def start_inference_executors(self):
        """
        Create and start worker processes based on 'gpu_executors'. 
        Each process is bound to a specific GPU device.
        """
        for device, num_procs in self.gpu_executors.items():
            for i in range(num_procs):
                p = Process(
                    target=worker_process,
                    args=(
                        self.task_queue,
                        self.stop_event,
                        device,
                        i,
                        self.weight_file,
                        self.result_save_path,
                        self.threshold,
                        self.batch_size,
                        self.img_size
                    )
                )
                p.start()
                self.processes.append(p)

    def read_input_image(self, img_path: str):
        """
        Put an image path into the task queue to be processed.
        """
        self.task_queue.put(img_path)

    def stop_all_executors(self):
        """
        Wait until the task queue is empty, then set 'stop_event' to 
        notify all processes to exit, and finally join them.
        """
        while not self.task_queue.empty():
            time.sleep(0.1)

        self.stop_event.set()

        for p in self.processes:
            p.join()

        self.processes.clear()
