import os
import time
from multiprocessing import Process, Manager
from ultralytics import YOLO
import cv2
from xml.etree.ElementTree import Element, SubElement, ElementTree


def worker_process(
    task_queue,
    stop_event,
    device,
    process_idx,
    weight_file,
    result_save_path,
    threshold,
    batch_size,
    img_size,
    stats,
    image_info_list,
):
    """
    Worker process that pulls image paths from `task_queue`, performs batched
    inference with YOLOv8, saves annotated results / VOC-style XML, updates
    latency statistics and appends per-image info to `image_info_list`.
    """

    # Load model onto the assigned device
    model = YOLO(weight_file)
    model.to(device)

    class_names = model.names if hasattr(model, "names") else {}

    batch_data = []                # [(img_path, img_arr), ...]
    image_start_times = {}         # {img_path: t0}

    while True:
        if not task_queue.empty():
            img_path = task_queue.get()
            if img_path is None:
                continue

            abs_img_path = os.path.abspath(img_path)
            print(
                f"[Device gpu:{device.split(':')[-1]} | Proc {process_idx}] "
                f"Start inference: {abs_img_path}"
            )

            image_start_times[abs_img_path] = time.time()

            image = cv2.imread(abs_img_path)
            if image is None:
                print(
                    f"[Device gpu:{device.split(':')[-1]} | Proc {process_idx}] "
                    f"Cannot read image: {abs_img_path}"
                )
                continue

            batch_data.append((abs_img_path, image))

            # Run inference once batch is full
            if len(batch_data) == batch_size:
                _infer_and_save(
                    model,
                    batch_data,
                    class_names,
                    result_save_path,
                    threshold,
                    img_size,
                    device,
                    image_start_times,
                    stats,
                    image_info_list,
                )
                batch_data = []

        else:
            # Flush remaining images in the last (possibly smaller) batch
            if batch_data:
                _infer_and_save(
                    model,
                    batch_data,
                    class_names,
                    result_save_path,
                    threshold,
                    img_size,
                    device,
                    image_start_times,
                    stats,
                    image_info_list,
                )
                batch_data = []

            if stop_event.is_set():
                break
            time.sleep(0.1)

    print(
        f"[Device gpu:{device.split(':')[-1]} | Proc {process_idx}] "
        "Exiting."
    )


def _infer_and_save(
    model,
    batch_data,
    class_names,
    result_save_path,
    threshold,
    img_size,
    device,
    image_start_times,
    stats,
    image_info_list,
):
    """
    Run model.predict on `batch_data`, save results, update stats/list.
    """
    images = [item[1] for item in batch_data]
    results = model.predict(
        images, conf=threshold, device=f"cuda:{device}", imgsz=img_size
    )
    print(
        f"[Device gpu:{device.split(':')[-1]}] "
        f"Batch inference done. Batch size = {len(batch_data)}"
    )

    for (abs_img_path, img_arr), result in zip(batch_data, results):
        # 1) Save annotated image + XML
        _save_results(
            abs_img_path, img_arr, result, result_save_path, class_names
        )

        # 2) Latency statistics
        latency = time.time() - image_start_times[abs_img_path]
        _update_stats(stats, latency, image_start_times[abs_img_path])

        # 3) Append per-image info (absolute paths)
        abs_result_path = os.path.abspath(
            os.path.join(result_save_path, os.path.basename(abs_img_path))
        )
        detection_found = len(result.boxes) > 0
        image_info_list.append(
            {
                "original_img_path": abs_img_path,
                "result_img_path": abs_result_path,
                "error": detection_found,  # True means target detected (anomaly)
            }
        )


def _update_stats(stats, latency, start_time):
    """
    Update shared dict `stats` with latency info.
    """
    if stats["start_time"] == 0.0:
        stats["start_time"] = start_time

    stats["total_images"] += 1
    stats["sum_latency"] += latency
    stats["end_time"] = time.time()


def _save_results(img_path, image, result, result_save_path, class_names):
    """
    Save annotated image and Pascal-VOC XML for a single result.
    """
    annotated_img = result.plot()
    base_name = os.path.basename(img_path)
    save_path = os.path.join(result_save_path, base_name)
    cv2.imwrite(save_path, annotated_img)

    xml_name = os.path.splitext(base_name)[0] + ".xml"
    xml_path = os.path.join(result_save_path, xml_name)

    # Build XML tree
    annotation = Element("annotation")
    SubElement(annotation, "filename").text = base_name

    h, w, d = image.shape
    size_e = SubElement(annotation, "size")
    SubElement(size_e, "width").text = str(w)
    SubElement(size_e, "height").text = str(h)
    SubElement(size_e, "depth").text = str(d)

    det_boxes = result.boxes
    if det_boxes is not None and len(det_boxes) > 0:
        for box in det_boxes:
            cls_id = int(box.cls[0].item())
            conf = float(box.conf[0].item())
            xmin, ymin, xmax, ymax = box.xyxy[0].cpu().numpy()

            obj = SubElement(annotation, "object")
            SubElement(obj, "name").text = class_names.get(cls_id, f"class_{cls_id}")
            SubElement(obj, "confidence").text = f"{conf:.3f}"

            bb = SubElement(obj, "bndbox")
            SubElement(bb, "xmin").text = str(int(xmin))
            SubElement(bb, "ymin").text = str(int(ymin))
            SubElement(bb, "xmax").text = str(int(xmax))
            SubElement(bb, "ymax").text = str(int(ymax))

    ElementTree(annotation).write(xml_path, encoding="utf-8", xml_declaration=False)


class CEDInference:
    """
    Spawn multiple worker processes across GPUs for YOLOv8 inference and
    expose runtime metrics (latency / throughput) plus per-image info.
    """

    def __init__(
        self,
        weight_file: str,
        gpu_executors: dict,
        result_save_path: str,
        threshold: float = 0.25,
        batch_size: int = 1,
        img_size: int = 640,
    ):
        self.weight_file = weight_file
        self.gpu_executors = gpu_executors
        self.result_save_path = result_save_path
        self.threshold = threshold
        self.batch_size = batch_size
        self.img_size = img_size

        os.makedirs(self.result_save_path, exist_ok=True)

        self.manager = Manager()
        self.task_queue = self.manager.Queue()
        self.stop_event = self.manager.Event()
        self.processes = []

        # Shared runtime statistics
        self.stats = self.manager.dict(
            start_time=0.0,
            end_time=0.0,
            total_images=0,
            sum_latency=0.0,
        )
        # Shared list for per-image records
        self.image_info_list = self.manager.list()

    def start_inference_executors(self):
        """Spawn worker processes as defined in `gpu_executors`."""
        for device, n_proc in self.gpu_executors.items():
            for idx in range(n_proc):
                p = Process(
                    target=worker_process,
                    args=(
                        self.task_queue,
                        self.stop_event,
                        device,
                        idx,
                        self.weight_file,
                        self.result_save_path,
                        self.threshold,
                        self.batch_size,
                        self.img_size,
                        self.stats,
                        self.image_info_list,
                    ),
                )
                p.start()
                self.processes.append(p)

    def read_input_image(self, img_path: str):
        """Enqueue an image path for inference."""
        self.task_queue.put(img_path)

    def stop_all_executors(self):
        """Gracefully stop all workers after queue is empty."""
        while not self.task_queue.empty():
            time.sleep(0.1)

        self.stop_event.set()
        for p in self.processes:
            p.join()
        self.processes.clear()

    # ── Runtime Metrics ──────────────────────────────────────────────────────
    def get_avg_latency(self) -> float:
        """Return average per-image latency (seconds)."""
        if self.stats["total_images"] == 0:
            return 0.0
        return self.stats["sum_latency"] / self.stats["total_images"]

    def get_throughput(self) -> float:
        """Return average throughput (images / second)."""
        if self.stats["total_images"] == 0:
            return 0.0
        elapsed = self.stats["end_time"] - self.stats["start_time"]
        return 0.0 if elapsed <= 0 else self.stats["total_images"] / elapsed

    # ── New: Per-image Information ───────────────────────────────────────────
    def get_image_info(self):
        """
        Return a list of dicts, each like:
        {
            "original_img_path": "<absolute path>",
            "result_img_path":   "<absolute path>",
            "error":             bool  # True ⇒ detection found; False ⇒ no target
        }
        """
        return list(self.image_info_list)
