#!/usr/bin/env python3
# -*- coding:utf-8 -*-

import os
import time
from glob import glob
import numpy as np
import cv2
from tqdm import tqdm
from rknnlite.api import RKNNLite

# ----------------------------
# Utility Functions
# ----------------------------

def compute_iou(box, boxes):
    xmin = np.maximum(box[0], boxes[:, 0])
    ymin = np.maximum(box[1], boxes[:, 1])
    xmax = np.minimum(box[2], boxes[:, 2])
    ymax = np.minimum(box[3], boxes[:, 3])
    inter_area = np.maximum(0, xmax - xmin) * np.maximum(0, ymax - ymin)
    box_area = (box[2] - box[0]) * (box[3] - box[1])
    boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    union_area = box_area + boxes_area - inter_area
    return inter_area / (union_area + 1e-6)

def nms(boxes, scores, iou_threshold):
    indices = np.argsort(scores)[::-1]
    keep = []
    while len(indices) > 0:
        current = indices[0]
        keep.append(current)
        if len(indices) == 1:
            break
        ious = compute_iou(boxes[current], boxes[indices[1:]])
        indices = indices[1:][ious < iou_threshold]
    return keep

def xywh2xyxy(x):
    y = np.copy(x)
    y[..., 0] = x[..., 0] - x[..., 2] / 2
    y[..., 1] = x[..., 1] - x[..., 3] / 2
    y[..., 2] = x[..., 0] + x[..., 2] / 2
    y[..., 3] = x[..., 1] + x[..., 3] / 2
    return y

# ----------------------------
# Drawing
# ----------------------------

class_names = ['charger']
rng = np.random.default_rng(3)
colors = rng.uniform(50, 200, size=(len(class_names), 3))

def draw_detections(image, boxes, scores, class_ids):
    image_cp = image.copy()
    height, width = image_cp.shape[:2]
    font_scale = min(height, width) * 0.0006
    thickness = int(min(height, width) * 0.001)

    for box, score, cls_id in zip(boxes, scores, class_ids):
        color = colors[cls_id]
        x1, y1, x2, y2 = box.astype(int)
        cv2.rectangle(image_cp, (x1, y1), (x2, y2), color, 2)
        label = f"{class_names[cls_id]} {int(score * 100)}%"
        (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, font_scale, thickness)
        cv2.rectangle(image_cp, (x1, y1), (x1 + tw, y1 - int(th * 1.2)), color, -1)
        cv2.putText(image_cp, label, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255), thickness, cv2.LINE_AA)

    return image_cp

# ----------------------------
# YOLOv8 RKNN Lite Detector
# ----------------------------

class YOLOv8:
    def __init__(self, model_path, conf_thres=0.3, iou_thres=0.2):
        self.conf_threshold = conf_thres
        self.iou_threshold = iou_thres
        self.input_width = 640
        self.input_height = 640
        self.rknn = RKNNLite()
        self.rknn.load_rknn(model_path)
        self.rknn.init_runtime()
        self.image = None
        self.img_height = None
        self.img_width = None
        self.boxes = []
        self.scores = []
        self.class_ids = []

    def prepare_input(self, image):
        self.img_height, self.img_width = image.shape[:2]
        # input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        input_img = image
        input_img = cv2.resize(input_img, (self.input_width, self.input_height))
        input_img = input_img.transpose(2, 0, 1).astype(np.float32)
        return np.expand_dims(input_img, axis=0)

    def detect_objects(self, image_path):
        if not os.path.exists(image_path):
            print(f"[WARN] Image path does not exist: {image_path}")
            self.boxes, self.scores, self.class_ids = [], [], []
            return

        image = cv2.imread(image_path)
        if image is None:
            print(f"[WARN] Failed to read image: {image_path}")
            self.boxes, self.scores, self.class_ids = [], [], []
            return

        self.image = image
        input_tensor = self.prepare_input(image)
        outputs = self.rknn.inference(inputs=[input_tensor])
        print("Output shape:", [out.shape for out in outputs])

        if not outputs or outputs[0] is None:
            print(f"[ERROR] Inference failed on image: {image_path}")
            self.boxes, self.scores, self.class_ids = [], [], []
            return

        self.boxes, self.scores, self.class_ids = self.process_output(outputs)

    # def process_output(self, output):
    #     predictions = np.squeeze(output[0]).T  # Shape: (4725, 6)
    #     objectness = predictions[:, 4]
    #     class_ids = predictions[:, 5].astype(np.int32)
    #     mask = objectness > self.conf_threshold
    #     predictions = predictions[mask]
    #     objectness = objectness[mask]
    #     class_ids = class_ids[mask]

    #     if len(objectness) == 0:
    #         return [], [], []

    #     boxes = self.extract_boxes(predictions)
    #     indices = nms(boxes, objectness, self.iou_threshold)
    #     return boxes[indices], objectness[indices], class_ids[indices]

    def process_output(self, output):
        # np.savetxt("out.txt", output[0][0], delimiter=",", fmt="%.6f")
        predictions = np.squeeze(output[0]).T  # (N, 6)
        scores = np.max(predictions[:, 4:], axis=1)
        mask = scores > self.conf_threshold
        predictions = predictions[mask]
        scores = scores[mask]
        if len(scores) == 0:
            return [], [], []
        class_ids = np.argmax(predictions[:, 4:], axis=1)
        boxes = predictions[:, :4]
        boxes = self.rescale_boxes(boxes)
        boxes = xywh2xyxy(boxes)
        indices = nms(boxes, scores, self.iou_threshold)
        return boxes[indices], scores[indices], class_ids[indices]


    def extract_boxes(self, predictions):
        boxes = predictions[:, :4]
        boxes = self.rescale_boxes(boxes)
        return xywh2xyxy(boxes)

    def rescale_boxes(self, boxes):
        boxes = boxes / np.array([self.input_width, self.input_height, self.input_width, self.input_height], dtype=np.float32)
        boxes *= np.array([self.img_width, self.img_height, self.img_width, self.img_height])
        return boxes

    def draw(self):
        return draw_detections(self.image, self.boxes, self.scores, self.class_ids)

# ----------------------------
# Main
# ----------------------------

if __name__ == "__main__":
    images_dir = "images"
    image_paths = sorted(glob(os.path.join(images_dir, "*.jpg")))
    model_path = "weights/rkyolov10_0529.rknn"

    detector = YOLOv8(model_path)

    os.makedirs("vis_output", exist_ok=True)

    # for img_path in tqdm(image_paths, desc="Processing Images"):
    for i in range(1):
        img_path = "test.jpg"
        detector.detect_objects(img_path)
        if len(detector.boxes) == 0:
            print(f"NO BOX FOUND")
            continue
        vis_img = detector.draw()
        save_path = os.path.join("vis_output", os.path.basename(img_path))
        cv2.imwrite(save_path, vis_img)
