import time
import threading

from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from queue import Queue
from typing import List, Optional

import cv2
import numpy as np
import torch

from fastapi import UploadFile
from loguru import logger
from ultralytics import YOLO
from ultralytics.engine.results import Results


class TaskHandle:
    def __init__(self):
        """Handle for tracking asynchronous inference task results.

        Attributes:
            _ready (threading.Event): Signal flag for task completion
            _result (Optional[Results]): YOLO prediction results
            _error (Optional[Exception]): Captured exception if task failed
        """
        self._ready = threading.Event()
        self._result: Optional[List[Results]] = None
        self._error: Optional[Exception] = None

    def wait_result(self, timeout: Optional[float] = None) -> Results:
        """Block until task completes or timeout occurs.

        Args:
            timeout: Maximum wait time in seconds (None = indefinite)

        Returns:
            Results: YOLO detection results

        Raises:
            TimeoutError: If timeout exceeded
            Exception: Any error that occurred during processing
        """
        if not self._ready.wait(timeout):
            raise TimeoutError("Task processing timeout")
        if self._error:
            raise self._error
        return self._result  # type: ignore

    def set_result(self, result: List[Results]) -> None:
        """Set task result and signal completion.

        Args:
            result: YOLO prediction results
        """
        self._result = result
        self._ready.set()

    def set_error(self, error: Exception) -> None:
        """Set task error and signal completion.

        Args:
            error: Captured exception during processing
        """
        self._error = error
        self._ready.set()


class Inference:
    def __init__(
        self,
        weight_path: Path,
        save_result: bool = False,
        save_path: Optional[Path] = None,
    ) -> None:
        """Initialize YOLO inference engine.

        Args:
            weight_path: Path to .pt model weights file
        """
        self.weight_path = weight_path
        self.save_result = save_result
        self.save_path = save_path

        self.gpu_num = torch.cuda.device_count()
        self.executor = ThreadPoolExecutor(max_workers=self.gpu_num + 2)
        self.task_queue: Queue[tuple[TaskHandle, UploadFile]] = Queue()
        self.gpu_queues: List[Queue[tuple[TaskHandle, UploadFile]]] = [
            Queue() for _ in range(self.gpu_num or 1)
        ]

        if self.save_result:
            self._create_result_dir()

    def start(self) -> None:
        """Start background processing threads.

        Launches:
        - GPU memory monitor
        - GPU worker threads
        - Task distributor thread
        """
        # Start memory monitor
        self.executor.submit(self._monitor_gpu_memory)

        # Start GPU workers
        if self.gpu_num == 0:
            self.executor.submit(self._worker_loop, None)
        else:
            for gpu_id in range(self.gpu_num):
                self.executor.submit(self._worker_loop, gpu_id)

        # Start task distributor
        self.executor.submit(self._distribute_tasks)

    def submit_task(self, task: UploadFile) -> TaskHandle:
        """Submit new inference task to processing queue.

        Args:
            task: Binary input stream containing image data

        Returns:
            TaskHandle: Reference for tracking this task's status
        """
        handle = TaskHandle()
        self.task_queue.put((handle, task))
        logger.info(f"Submitted new task: {task.filename}")

        return handle

    def _create_result_dir(self) -> None:
        """Create result directory if it does not exist"""
        if self.save_path is None:
            return

        self.save_path.mkdir(parents=True, exist_ok=True)
        (self.save_path / "original").mkdir(parents=True, exist_ok=True)
        (self.save_path / "predictions").mkdir(parents=True, exist_ok=True)
        (self.save_path / "labels").mkdir(parents=True, exist_ok=True)

    def _monitor_gpu_memory(self) -> None:
        """Background thread monitoring GPU memory usage"""
        if self.gpu_num == 0:
            return

        while True:
            for gpu_id in range(self.gpu_num):
                alloc = torch.cuda.memory_allocated(gpu_id)
                total = torch.cuda.get_device_properties(gpu_id).total_memory
                logger.info(
                    f"GPU {gpu_id} memory: {alloc / 1024**2:.1f}MB / {total / 1024**3:.1f}GB"
                )
            time.sleep(5)

    def _worker_loop(self, gpu_id: Optional[int]) -> None:
        """Worker thread processing tasks for specific GPU"""
        model = YOLO(self.weight_path)
        if gpu_id is not None:
            model = model.to(f"cuda:{gpu_id}")
        else:
            model = model.to("cpu")

        while True:
            handle, task = self.gpu_queues[gpu_id or 0].get()

            logger.info(
                f"Task {task.filename} start inference on GPU(CPU) {gpu_id}"
            ) if gpu_id is not None else logger.info(
                f"Task {task.filename} start inference on CPU"
            )

            try:
                with open(self.save_path / "images" / task.filename, "wb") as f:
                    f.write(task.file.read())
                task.file.seek(0)

                image_array = np.frombuffer(task.file.read(), dtype=np.uint8)
                cv_mat = cv2.imdecode(image_array, cv2.IMREAD_COLOR)

                # Process task on this GPU
                result = model.predict(cv_mat)
                if self.save_result:
                    assert self.save_path is not None
                    assert task.filename is not None
                    result[0].save(self.save_path / "predictions" / task.filename)
                    result[0].save_txt(
                        self.save_path / "labels" / f"{task.filename.split('.')[0]}.txt"
                    )

                handle.set_result(result)
                logger.info(f"Task {task.filename} completed")
            except Exception as e:
                handle.set_error(e)
                logger.error(f"Task failed: {e}")
            finally:
                handle._ready.set()
                self.gpu_queues[gpu_id or 0].task_done()

    def _distribute_tasks(self) -> None:
        """Distribute tasks from main queue to GPU buffers"""
        worker_idx = 0
        while True:
            handle, task = self.task_queue.get()
            self.gpu_queues[worker_idx].put((handle, task))
            worker_idx = (worker_idx + 1) % self.gpu_num
            logger.info(f"Task {task.filename} distributed to GPU {worker_idx}")
