import cv2
import numpy as np
import onnxruntime as ort
from PIL import ImageFont, ImageDraw, Image
import matplotlib.font_manager
import sys

class YOLOONNXModel:
    """YOLOv8 Object Detection Model Class for Inference Processing."""
    
    def __init__(self, onnx_model, classes, inf_res, confidence_thres, iou_threshold):
        """
        Initialize the YOLOONNX model.

        Args:
            onnx_model (str): Path to the ONNX model file.
            classes (list): List of class names.
            inf_res (int): Inference resolution.
            confidence_thres (float): Confidence threshold.
            iou_threshold (float): IoU threshold for non-max suppression.
        """
        self.onnx_model = onnx_model
        self.confidence_thres = confidence_thres
        self.inf_res = inf_res
        self.classes = classes
        self.iou_threshold = iou_threshold    
        self.color_palette = np.random.uniform(100, 255, size=(len(self.classes), 3))

    def preprocess(self, img):
        """
        Preprocess the input image for inference.

        Args:
            img (numpy.ndarray): Input image.

        Returns:
            numpy.ndarray: Preprocessed image data.
        """
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (self.inf_res, self.inf_res), interpolation=cv2.INTER_NEAREST)
        image_data = np.array(img) / 255.0
        image_data = np.transpose(image_data, (2, 0, 1))
        image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
        return image_data

    def postprocess(self, outputs):
        """
        Postprocess the model outputs to extract bounding boxes, class IDs, and scores.

        Args:
            outputs (numpy.ndarray): Model outputs.

        Returns:
            tuple: Processed bounding boxes, class IDs, and scores.
        """
        outputs = np.transpose(np.squeeze(outputs[0]))

        # Extract bounding box coordinates
        boxes = outputs[:, :4]

        confidences = outputs[:, 4:]
        class_ids = np.argmax(confidences, axis=1)
        scores = np.max(confidences, axis=1)

        # Retain results with confidence greater than the threshold
        mask = scores > self.confidence_thres
        boxes = boxes[mask]
        class_ids = class_ids[mask]
        scores = scores[mask]
        boxes = np.around(boxes, 2)
        scores = np.around(scores, 2)

        # Apply Non-Maximum Suppression (NMS)
        indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), self.confidence_thres, self.iou_threshold)
        if len(indices) > 0:
            indices = indices.flatten()
            boxes = boxes[indices]
            class_ids = class_ids[indices]
            scores = scores[indices]

        # Pack data into a dictionary
        result = {
            'boxes': boxes,
            'class_ids': class_ids,
            'scores': scores,
        }

        return result, class_ids, scores

    def run(self, image_path):
        """
        Run inference on an input image.

        Args:
            image_path (str): Path to the input image.

        Returns:
            tuple: Image with drawn boxes, result dictionary, class IDs, and scores.
        """
        # Read the image
        original_size = image_path.shape[:2]

        session = ort.InferenceSession(self.onnx_model)
        image_data = self.preprocess(image_path)
        input_name = session.get_inputs()[0].name
        output = session.run(None, {input_name: image_data})
        print(output)
        result, class_ids, scores = self.postprocess(output)

        # Draw bounding boxes
        img = self.draw_boxes(image_path, result, original_size)

        return img, result, class_ids, scores

    def drawChinese(self, text, x, y, size, r, g, b, a, img):
        """
        Draw Chinese text on the image.

        Args:
            text (str): Text to draw.
            x (int): X coordinate.
            y (int): Y coordinate.
            size (int): Font size.
            r (int): Red color value.
            g (int): Green color value.
            b (int): Blue color value.
            a (int): Alpha value.
            img (numpy.ndarray): Image to draw on.

        Returns:
            numpy.ndarray: Image with drawn text.
        """
        if "linux" in sys.platform:
            font_path = 'HYQiHei_50S.ttf'
        else:
            font_path = matplotlib.font_manager.findfont('SimHei')
        font = ImageFont.truetype(font_path, size)
        img_pil = Image.fromarray(img)
        draw = ImageDraw.Draw(img_pil)
        draw.text((x, y), text, font=font, fill=(b, g, r, a))
        frame = np.array(img_pil)
        return frame

    def draw_boxes(self, img, result, original_size):
        """
        Draw bounding boxes on the image.

        Args:
            img (numpy.ndarray): Input image.
            result (dict): Result dictionary containing boxes, class IDs, and scores.
            original_size (tuple): Original size of the image.

        Returns:
            numpy.ndarray: Image with drawn bounding boxes.
        """
        original_height, original_width = original_size
        scale_x = original_width / self.inf_res
        scale_y = original_height / self.inf_res

        for i in range(len(result['boxes'])):
            box = result['boxes'][i]
            class_id = result['class_ids'][i]
            score = result['scores'][i]

            # Convert bounding box coordinates back to original image size
            x, y, w, h = map(int, box * [scale_x, scale_y, scale_x, scale_y])
            x1 = x - w // 2
            y1 = y - h // 2
            x2 = x1 + w
            y2 = y1 + h

            # Select a color
            color = self.color_palette[class_id]

            # Draw the bounding box
            cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)

            # Add class label and confidence score
            label = f"{self.classes[class_id]} {score:.2f}"
            img = self.drawChinese(label, x1, y1 + 10, 20, int(color[0]), int(color[1]), int(color[2]), 255, img)

        return img