# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license

import argparse
import os
from typing import List, Tuple, Optional

import cv2
import numpy as np
import onnxruntime as ort
from pathlib import Path
import sys

FILE = Path(__file__).resolve()
CURRENT_DIR = FILE.parent
ROOT = FILE.parents[2]      # YOLO_samples
MY_ASSETS = ROOT / "assets"
MY_DATASET = MY_ASSETS / "coco128/images/train2017"
MY_MODEL = ROOT / "models"
sys.path.append(str(ROOT))

import torch
from ultralytics.utils import YAML
from ultralytics.utils.checks import check_requirements, check_yaml
import amct_onnx as amct
from amct_onnx import AMCT_SO


class YOLOv8:
    """
    YOLOv8 object detection model class for handling ONNX inference and visualization.

    This class provides functionality to load a YOLOv8 ONNX model, perform inference on images,
    and visualize the detection results with bounding boxes and labels.

    Attributes:
        onnx_model (str): Path to the ONNX model file.
        input_path (str): Path to the input image file or folder containing images.
        confidence_thres (float): Confidence threshold for filtering detections.
        iou_thres (float): IoU threshold for non-maximum suppression.
        classes (List[str]): List of class names from the COCO dataset.
        color_palette (np.ndarray): Random color palette for visualizing different classes.
        input_width (int): Width dimension of the model input.
        input_height (int): Height dimension of the model input.
        img (np.ndarray): The loaded input image (for single image processing).
        img_height (int): Height of the input image (for single image processing).
        img_width (int): Width of the input image (for single image processing).

    Methods:
        letterbox: Resize and reshape images while maintaining aspect ratio by adding padding.
        draw_detections: Draw bounding boxes and labels on the input image based on detected objects.
        preprocess: Preprocess a single input image before performing inference.
        postprocess: Perform post-processing on the model's output to extract and visualize detections.
        infer_single_image: Perform inference on a single image and return preprocessed data, padding, and raw output.
        main: Iterate over input path (single image/folder), perform inference, and handle post-processing/storage.

    Examples:
        Initialize YOLOv8 detector and run inference on single image
        >> detector = YOLOv8("yolov8n.onnx", "image.jpg", 0.5, 0.5)
        >> output_image = detector.main()

        Initialize YOLOv8 detector and run inference on folder
        >> detector = YOLOv8("yolov8n.onnx", "image_folder/", 0.5, 0.5)
        >> detector.main()  # No output image returned, only inference performed
    """

    def __init__(self, onnx_model: str, input_path: str, output_path: str,
                 confidence_thres: float, iou_thres: float):
        """
        Initialize an instance of the YOLOv8 class.

        Args:
            onnx_model (str): Path to the ONNX model.
            input_path (str): Path to the input image file or folder containing images.
            output_path (str): Path to the output image folder.
            confidence_thres (float): Confidence threshold for filtering detections.
            iou_thres (float): IoU threshold for non-maximum suppression.
        """
        self.onnx_model = onnx_model
        self.input_path = input_path            # supports file/folder
        self.output_path = output_path          # folder
        self.output_suffix = ""

        if os.path.isfile(self.output_path):
            raise ValueError(f"Param 'output_path' is a file, but a folder is required: {self.output_path}")
        elif not os.path.isdir(self.output_path):
            os.makedirs(self.output_path, exist_ok=True)
        
        self.confidence_thres = confidence_thres
        self.iou_thres = iou_thres

        # Load the class names from the COCO dataset
        self.classes = YAML.load(check_yaml("coco8.yaml"))["names"]

        # Generate a color palette for the classes
        self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))

        # Initialize ONNX session in __init__ for reusability (avoids reloading model for multiple images)
        self.session = ort.InferenceSession(self.onnx_model, sess_options=AMCT_SO, providers=["CPUExecutionProvider"])
        self.model_inputs = self.session.get_inputs()
        input_shape = self.model_inputs[0].shape
        self.input_width = input_shape[2]
        self.input_height = input_shape[3]

    def letterbox(self, img: np.ndarray, new_shape: Tuple[int, int] = (640, 640)) -> Tuple[np.ndarray, Tuple[int, int]]:
        """
        Resize and reshape images while maintaining aspect ratio by adding padding.

        Args:
            img (np.ndarray): Input image to be resized.
            new_shape (Tuple[int, int]): Target shape (height, width) for the image.

        Returns:
            img (np.ndarray): Resized and padded image.
            pad (Tuple[int, int]): Padding values (top, left) applied to the image.
        """
        shape = img.shape[:2]  # current shape [height, width]

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

        # Compute padding
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = (new_shape[1] - new_unpad[0]) / 2, (new_shape[0] - new_unpad[1]) / 2  # wh padding

        if shape[::-1] != new_unpad:  # resize
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114))

        return img, (top, left)

    def draw_detections(self, img: np.ndarray, box: List[float], score: float, class_id: int) -> None:
        """Draw bounding boxes and labels on the input image based on the detected objects."""
        # Extract the coordinates of the bounding box
        x1, y1, w, h = box

        # Retrieve the color for the class ID
        color = self.color_palette[class_id]

        # Draw the bounding box on the image
        cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)

        # Create the label text with class name and score
        label = f"{self.classes[class_id]}: {score:.2f}"

        # Calculate the dimensions of the label text
        (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)

        # Calculate the position of the label text
        label_x = x1
        label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10

        # Draw a filled rectangle as the background for the label text
        cv2.rectangle(
            img, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color, cv2.FILLED
        )

        # Draw the label text on the image
        cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)

    def preprocess(self, img: np.ndarray) -> Tuple[np.ndarray, Tuple[int, int]]:
        """
        Preprocess a single input image before performing inference.

        This method converts image color space, applies letterboxing to maintain aspect ratio,
        normalizes pixel values, and prepares the image data for model input.

        Args:
            img (np.ndarray): Raw input image (BGR format from cv2.imread).

        Returns:
            image_data (np.ndarray): Preprocessed image data ready for inference with shape (1, 3, height, width).
            pad (Tuple[int, int]): Padding values (top, left) applied during letterboxing.
        """
        # Store original image dimensions for later use
        self.img_height, self.img_width = img.shape[:2]

        # Convert the image color space from BGR to RGB
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        # Apply letterboxing to maintain aspect ratio
        img_letterbox, pad = self.letterbox(img_rgb, (self.input_width, self.input_height))

        # Normalize the image data by dividing it by 255.0
        image_data = np.array(img_letterbox) / 255.0

        # Transpose the image to have the channel dimension as the first dimension (channel-first format)
        image_data = np.transpose(image_data, (2, 0, 1))

        # Expand the dimensions of the image data to match the expected input shape (add batch dimension)
        image_data = np.expand_dims(image_data, axis=0).astype(np.float32)

        return image_data, pad

    def postprocess(self, input_image: np.ndarray, output: List[np.ndarray], pad: Tuple[int, int]) -> np.ndarray:
        """
        Perform post-processing on the model's output to extract and visualize detections.

        This method processes the raw model output to extract bounding boxes, scores, and class IDs.
        It applies non-maximum suppression to filter overlapping detections and draws the results on the input image.

        Args:
            input_image (np.ndarray): Original input image (BGR format) to draw detections on.
            output (List[np.ndarray]): The output arrays from the model.
            pad (Tuple[int, int]): Padding values (top, left) used during letterboxing.

        Returns:
            (np.ndarray): The input image with detections drawn on it (BGR format).
        """
        # Transpose and squeeze the output to remove batch dimension and align shape (num_detections, 4+num_classes)
        outputs = np.transpose(np.squeeze(output[0]))

        # Get the number of rows (detections) in the outputs array
        rows = outputs.shape[0]

        # Lists to store the bounding boxes, scores, and class IDs of valid detections
        boxes = []
        scores = []
        class_ids = []

        # Calculate the scaling factor to map letterboxed coordinates back to original image size
        gain = min(self.input_height / self.img_height, self.input_width / self.img_width)

        # Adjust bounding box coordinates by subtracting padding (reverse letterboxing)
        outputs[:, 0] -= pad[1]  # x coordinate adjustment
        outputs[:, 1] -= pad[0]  # y coordinate adjustment

        # Iterate over each detection to filter by confidence threshold
        for i in range(rows):
            # Extract class scores (excluding first 4 bounding box coordinates)
            classes_scores = outputs[i][4:]

            # Find the maximum score among all classes for the current detection
            max_score = np.amax(classes_scores)

            # Keep detection only if maximum score exceeds confidence threshold
            if max_score >= self.confidence_thres:
                # Get the class ID with the highest score
                class_id = np.argmax(classes_scores)

                # Extract raw bounding box coordinates (x_center, y_center, width, height)
                x_center, y_center, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]

                # Convert center-based coordinates to corner-based (left, top, width, height) and scale to original size
                left = int((x_center - w / 2) / gain)
                top = int((y_center - h / 2) / gain)
                width = int(w / gain)
                height = int(h / gain)

                # Append valid detection data to lists
                class_ids.append(class_id)
                scores.append(max_score)
                boxes.append([left, top, width, height])

        # Apply Non-Maximum Suppression (NMS) to filter overlapping bounding boxes
        indices = cv2.dnn.NMSBoxes(boxes, scores, self.confidence_thres, self.iou_thres)

        # Draw detections on the original input image
        for i in indices:
            box = boxes[i]
            score = scores[i]
            class_id = class_ids[i]
            self.draw_detections(input_image, box, score, class_id)

        return input_image

    def infer_single_image(self, img_path: str) -> Tuple[np.ndarray, List[np.ndarray], Tuple[int, int], np.ndarray]:
        """
        Perform inference on a single image: load image, preprocess, run model, return intermediate results.

        Args:
            img_path (str): Path to the single input image.

        Returns:
            img_raw (np.ndarray): Original raw image (BGR format) for post-processing.
            model_outputs (List[np.ndarray]): Raw output from the ONNX model.
            pad (Tuple[int, int]): Padding values from preprocessing (for post-processing).
            img_preprocessed (np.ndarray): Preprocessed image data (for debugging, optional).
        """
        # Load raw image using OpenCV (returns BGR format)
        img_raw = cv2.imread(img_path)
        if img_raw is None:
            raise ValueError(f"Failed to load image from path: {img_path}")

        # Preprocess the raw image
        img_preprocessed, pad = self.preprocess(img_raw)

        # Run inference with the preprocessed image data
        model_outputs = self.session.run(None, {self.model_inputs[0].name: img_preprocessed})

        return img_raw, model_outputs, pad, img_preprocessed

    def load_model(self, onnx_model: str):
        self.onnx_model = onnx_model
        self.session = ort.InferenceSession(self.onnx_model, sess_options=AMCT_SO, providers=["CPUExecutionProvider"])
    
    def load_data(self, input_path: str):
        self.input_path = input_path
    
    def set_suffix(self, suffix: str):
        self.output_suffix = suffix

    def main(self) -> Optional[np.ndarray]:
        """
        Main workflow: Iterate over input path (single image/folder), perform inference, handle post-processing/storage.

        - If input_path is a **single image**: Run inference → call postprocess → save output → return output image.
        - If input_path is a **folder**: Run inference on all images → discard outputs (no postprocess/save).

        Returns:
            Optional[np.ndarray]: Output image with detections (if single image) or None (if folder).
        """
        # Determine if input path is a file (single image) or directory (multiple images)
        if os.path.isfile(self.input_path):
            # Case 1: Single image input
            img_paths = [self.input_path]
            is_single_image = True
        elif os.path.isdir(self.input_path):
            # Case 2: Folder input (filter common image extensions)
            img_extensions = (".jpg", ".jpeg", ".png", ".bmp", ".tiff")
            img_paths = [
                os.path.join(self.input_path, f)
                for f in os.listdir(self.input_path)
                if f.lower().endswith(img_extensions)
            ]
            is_single_image = False
            print(f"Found {len(img_paths)} valid images in folder: {self.input_path}")
        else:
            raise FileNotFoundError(f"Input path not found or invalid: {self.input_path}")

        # Iterate over all image paths to perform inference
        output_image = None
        total_image = len(img_paths)
        for num, img_path in enumerate(img_paths, start=1):
            if num % 10 == 0 or num == total_image:
                print(f"Running inference {num}/{total_image}")
            
            # Step 1: Perform inference on single image (get raw outputs and preprocessing data)
            img_raw, model_outputs, pad, _ = self.infer_single_image(img_path)

            # Step 2: Handle post-processing and storage ONLY if input is a single image
            if is_single_image:
                # Manually call postprocess to draw detections
                output_image = self.postprocess(img_raw, model_outputs, pad)

                # Define output path (save in same directory as input image with suffix)
                _, img_filename = os.path.split(img_path)
                img_name, img_ext = os.path.splitext(img_filename)
                output_img_path = os.path.join(self.output_path, f"{img_name}_{self.output_suffix}{img_ext}")

                # Save the output image with detections
                cv2.imwrite(output_img_path, output_image)
                print(f"Output image saved to: {output_img_path}")

        # Return output image only if input was a single image
        return output_image if is_single_image else None


if __name__ == "__main__":
    # Create an argument parser to handle command-line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, default=str(MY_MODEL / "yolov8n.onnx"), help="Input your ONNX model.")
    parser.add_argument("--input", type=str, default=str(MY_ASSETS / "car_and_dog.jpg"), help="Path to input image or folder.")
    parser.add_argument("--results", type=str, default=str(CURRENT_DIR / "results"), help="Path to quant result")
    parser.add_argument("--conf-thres", type=float, default=0.3, help="Confidence threshold")
    parser.add_argument("--iou-thres", type=float, default=0.5, help="NMS IoU threshold")
    args = parser.parse_args()

    # Check required dependencies
    # check_requirements("onnxruntime-gpu" if torch.cuda.is_available() else "onnxruntime")
    check_requirements("onnxruntime")

    # Create an instance of the YOLOv8 class with the specified arguments
    detection = YOLOv8(args.model, args.input, args.results, args.conf_thres, args.iou_thres)

    # Step 1: do inference once
    # Execute inference
    detection.set_suffix(suffix="origin")
    output_image = detection.main()

    # Step 2: generate the quantization config file.
    config_file = str(CURRENT_DIR / 'tmp/config.json')
    amct.create_quant_config(config_file=config_file,
                             model_file=args.model,
                             skip_layers=[],
                             batch_num=1)
    
    # Step 3: quantize the model.
    record_file = str(CURRENT_DIR / 'tmp/record.txt')
    modified_model = str(CURRENT_DIR / 'tmp/modified_model.onnx')
    amct.quantize_model(config_file=config_file,
                        model_file=args.model,
                        modified_onnx_file=modified_model,
                        record_file=record_file)
    
    # Step 4: calibrate and save the quantized model.
    detection.load_model(modified_model)
    detection.load_data(str(MY_DATASET))
    detection.main()
    quant_model_path = str(CURRENT_DIR / 'results/yolov8')
    amct.save_model(modified_onnx_file=modified_model,
                    record_file=record_file,
                    save_path=quant_model_path)

    # Step five, reload and test the quantized model for 'Fakequant'.
    quant_model = os.path.join(args.results, 'yolov8_fake_quant_model.onnx')
    detection.load_model(quant_model)
    detection.load_data(args.input)
    detection.set_suffix(suffix="fake_quant")
    output_image = detection.main()

