import cv2
import numpy as np
from scipy.special import softmax
from hobot_dnn import pyeasy_dnn as dnn  # type: ignore

from time import time
import logging

# 日志模块配置
# logging configs
logging.basicConfig(
    level=logging.ERROR, # 通常在库中，默认 INFO，用户可调 DEBUG
    format='[%(name)s] [%(asctime)s.%(msecs)03d] [%(levelname)s] %(message)s',
    datefmt='%H:%M:%S')
logger = logging.getLogger("RDK_YOLO_Detector")

coco_names = [
    "ball" # 你可以根据你的模型实际检测的类别来修改这里
    # "person", "bicycle", "car", ... (如果是标准的 COCO 模型)
]

rdk_colors = [
    (56, 56, 255), (151, 157, 255), (31, 112, 255), (29, 178, 255), (49, 210, 207), (10, 249, 72),
    (23, 204, 146), (134, 219, 61), (52, 147, 26), (187, 212, 0), (168, 153, 44), (255, 194, 0),
    (147, 69, 52), (255, 115, 100), (236, 24, 0), (255, 56, 132), (133, 0, 82), (255, 56, 203),
    (200, 149, 255), (199, 55, 255)
]


def draw_detection_on_image(img_draw, bbox, score, class_id, class_names_list, colors_list) -> None:
    """
    Helper function to draw a single detection on an image.
    """
    x1, y1, x2, y2 = bbox
    try:
        color = colors_list[class_id % len(colors_list)]
        class_name = class_names_list[class_id]
    except IndexError:
        logger.warning(f"Class ID {class_id} is out of range for class_names or colors. Using default.")
        color = (255, 255, 255)
        class_name = f"ID:{class_id}"

    cv2.rectangle(img_draw, (x1, y1), (x2, y2), color, 2)
    label = f"{class_name}: {score:.2f}"
    (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
    label_x, label_y = x1, y1 - 10 if y1 - 10 > label_height else y1 + 10
    cv2.rectangle(
        img_draw, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color, cv2.FILLED
    )
    cv2.putText(img_draw, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)


class YOLOv8Detector:
    def __init__(self, model_path, classes_num=1, score_thres=0.25, nms_thres=0.7, reg=16, class_names=None, colors=None):
        self.model_path = model_path
        self.CLASSES_NUM = classes_num
        self.SCORE_THRESHOLD = score_thres
        self.NMS_THRESHOLD = nms_thres
        self.REG = reg
        self.CONF_THRES_RAW = -np.log(1 / self.SCORE_THRESHOLD - 1)

        self.class_names = class_names if class_names is not None else coco_names
        self.colors = colors if colors is not None else rdk_colors

        logger.info(f"Initializing YOLOv8Detector with model: {self.model_path}")
        logger.info(f"Params: CLASSES_NUM={self.CLASSES_NUM}, SCORE_THRESHOLD={self.SCORE_THRESHOLD}, "
                    f"NMS_THRESHOLD={self.NMS_THRESHOLD}, REG={self.REG}")

        try:
            begin_time = time()
            self.quantize_model = dnn.load(self.model_path)
            logger.debug(f"Load D-Robotics Quantize model time = {1000 * (time() - begin_time):.2f} ms")
        except Exception as e:
            logger.error(f"Failed to load model file: {self.model_path}")
            logger.error(f"Error: {e}")
            raise  # Re-raise the exception to be handled by the caller

        self._log_model_io_info()
        self._prepare_model_related_params()


    def _log_model_io_info(self):
        logger.info("-> input tensors")
        for i, quantize_input in enumerate(self.quantize_model[0].inputs):
            logger.info(f"  input[{i}], name={quantize_input.name}, type={quantize_input.properties.dtype}, "
                        f"shape={quantize_input.properties.shape}")

        logger.info("-> output tensors")
        # Store output info for dynamic access if needed in the future, though current postProcess is fixed index
        self.model_outputs_properties = []
        for i, quantize_output in enumerate(self.quantize_model[0].outputs):
            logger.info(f"  output[{i}], name={quantize_output.name}, type={quantize_output.properties.dtype}, "
                        f"shape={quantize_output.properties.shape}")
            self.model_outputs_properties.append(quantize_output.properties)


    def _prepare_model_related_params(self):
        # Assuming fixed output order for now, as in the original script.
        # For truly dynamic output order, a more robust parsing of self.model_outputs_properties would be needed here.
        if len(self.model_outputs_properties) < 6:
            raise ValueError(f"Expected at least 6 output tensors, but got {len(self.model_outputs_properties)}")

        # Prepare dequantization scales (assuming fixed indices for s, m, l bbox outputs)
        # Make sure these indices correspond to your model's bbox outputs
        try:
            self.s_bboxes_scale = self.model_outputs_properties[1].scale_data[np.newaxis, :]
            self.m_bboxes_scale = self.model_outputs_properties[3].scale_data[np.newaxis, :]
            self.l_bboxes_scale = self.model_outputs_properties[5].scale_data[np.newaxis, :]
            logger.info(f"BBox scales shapes: s={self.s_bboxes_scale.shape}, m={self.m_bboxes_scale.shape}, l={self.l_bboxes_scale.shape}")
        except AttributeError as e:
            logger.error(f"Error accessing scale_data. Ensure model outputs are as expected. {e}")
            # Fallback or error handling if scale_data is not present as expected
            logger.warning("scale_data not found or in unexpected format for bbox outputs. Using default scale of 1.0.")
            self.s_bboxes_scale = 1.0
            self.m_bboxes_scale = 1.0
            self.l_bboxes_scale = 1.0


        self.weights_static = np.array([i for i in range(self.REG)]).astype(np.float32)[np.newaxis, np.newaxis, :]
        logger.info(f"DFL weights_static shape: {self.weights_static.shape}")

        self.input_H, self.input_W = self.quantize_model[0].inputs[0].properties.shape[2:4]
        logger.info(f"Model input H={self.input_H}, W={self.input_W}")

        s_feat_h, s_feat_w = self.input_H // 8, self.input_W // 8
        m_feat_h, m_feat_w = self.input_H // 16, self.input_W // 16
        l_feat_h, l_feat_w = self.input_H // 32, self.input_W // 32

        self.s_anchor = np.stack([np.tile(np.linspace(0.5, s_feat_w - 0.5, s_feat_w), reps=s_feat_h),
                                   np.repeat(np.arange(0.5, s_feat_h + 0.5, 1)[:s_feat_h], s_feat_w)], axis=0).transpose(1, 0)
        self.m_anchor = np.stack([np.tile(np.linspace(0.5, m_feat_w - 0.5, m_feat_w), reps=m_feat_h),
                                   np.repeat(np.arange(0.5, m_feat_h + 0.5, 1)[:m_feat_h], m_feat_w)], axis=0).transpose(1, 0)
        self.l_anchor = np.stack([np.tile(np.linspace(0.5, l_feat_w - 0.5, l_feat_w), reps=l_feat_h),
                                   np.repeat(np.arange(0.5, l_feat_h + 0.5, 1)[:l_feat_h], l_feat_w)], axis=0).transpose(1, 0)
        logger.info(f"Anchor shapes: s={self.s_anchor.shape}, m={self.m_anchor.shape}, l={self.l_anchor.shape}")

        # For preprocess and postprocess coordinate mapping
        self.img_h = 0
        self.img_w = 0
        self.x_scale = 1.0
        self.y_scale = 1.0
        self.x_shift = 0
        self.y_shift = 0

    def _bgr2nv12(self, bgr_img):
        # begin_time = time() # Optional: for performance profiling
        height, width = bgr_img.shape[0], bgr_img.shape[1]
        area = height * width
        yuv420p = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2YUV_I420).reshape((area * 3 // 2,))
        y = yuv420p[:area]
        uv_planar = yuv420p[area:].reshape((2, area // 4))
        uv_packed = uv_planar.transpose((1, 0)).reshape((area // 2,))
        nv12 = np.zeros_like(yuv420p)
        nv12[:height * width] = y
        nv12[height * width:] = uv_packed
        # logger.debug(f"bgr8 to nv12 time = {1000 * (time() - begin_time):.2f} ms")
        return nv12

    def _preprocess_yuv420sp(self, img_bgr):
        # LETTERBOX_TYPE is assumed as per original script
        # begin_time = time() # Optional
        self.img_h, self.img_w = img_bgr.shape[0:2]

        self.x_scale = min(1.0 * self.input_H / self.img_h, 1.0 * self.input_W / self.img_w)
        self.y_scale = self.x_scale

        if self.x_scale <= 0 or self.y_scale <= 0: # Should not happen with valid image and model dims
            logger.error("Invalid scale factor during preprocessing.")
            # Fallback to avoid division by zero, though this indicates a deeper issue
            self.x_scale = 1.0 
            self.y_scale = 1.0

        new_w = int(self.img_w * self.x_scale)
        self.x_shift = (self.input_W - new_w) // 2
        x_other = self.input_W - new_w - self.x_shift

        new_h = int(self.img_h * self.y_scale)
        self.y_shift = (self.input_H - new_h) // 2
        y_other = self.input_H - new_h - self.y_shift

        resized_img = cv2.resize(img_bgr, (new_w, new_h))
        input_tensor_bgr = cv2.copyMakeBorder(resized_img, self.y_shift, y_other, self.x_shift, x_other,
                                           cv2.BORDER_CONSTANT, value=[127, 127, 127])
        input_tensor_nv12 = self._bgr2nv12(input_tensor_bgr)
        # logger.debug(f"preprocess (letter box) time = {1000 * (time() - begin_time):.2f} ms")
        # logger.info(f"Preprocess: y_scale={self.y_scale:.2f}, x_scale={self.x_scale:.2f}, "
        #             f"y_shift={self.y_shift:.2f}, x_shift={self.x_shift:.2f}")
        return input_tensor_nv12

    def _forward(self, input_tensor_nv12):
        # begin_time = time() # Optional
        quantize_outputs = self.quantize_model[0].forward(input_tensor_nv12)
        # logger.debug(f"forward time = {1000 * (time() - begin_time):.2f} ms")
        return quantize_outputs

    def _c2numpy(self, dnn_outputs):
        # begin_time = time() # Optional
        np_outputs = [dnnTensor.buffer for dnnTensor in dnn_outputs]
        # logger.debug(f"c to numpy time = {1000 * (time() - begin_time):.2f} ms")
        return np_outputs

    def _postprocess(self, np_outputs):
        # begin_time = time() # Optional
        # Assuming fixed output order from model [s_cls, s_box, m_cls, m_box, l_cls, l_box]
        s_clses_raw = np_outputs[0].reshape(-1, self.CLASSES_NUM)
        s_bboxes_raw = np_outputs[1].reshape(-1, self.REG * 4)
        m_clses_raw = np_outputs[2].reshape(-1, self.CLASSES_NUM)
        m_bboxes_raw = np_outputs[3].reshape(-1, self.REG * 4)
        l_clses_raw = np_outputs[4].reshape(-1, self.CLASSES_NUM)
        l_bboxes_raw = np_outputs[5].reshape(-1, self.REG * 4)

        s_max_scores = np.max(s_clses_raw, axis=1)
        s_valid_indices = np.flatnonzero(s_max_scores >= self.CONF_THRES_RAW)
        s_ids = np.argmax(s_clses_raw[s_valid_indices, :], axis=1)
        s_scores = s_max_scores[s_valid_indices]

        m_max_scores = np.max(m_clses_raw, axis=1)
        m_valid_indices = np.flatnonzero(m_max_scores >= self.CONF_THRES_RAW)
        m_ids = np.argmax(m_clses_raw[m_valid_indices, :], axis=1)
        m_scores = m_max_scores[m_valid_indices]

        l_max_scores = np.max(l_clses_raw, axis=1)
        l_valid_indices = np.flatnonzero(l_max_scores >= self.CONF_THRES_RAW)
        l_ids = np.argmax(l_clses_raw[l_valid_indices, :], axis=1)
        l_scores = l_max_scores[l_valid_indices]

        s_scores = 1 / (1 + np.exp(-s_scores)) if len(s_scores) > 0 else np.array([])
        m_scores = 1 / (1 + np.exp(-m_scores)) if len(m_scores) > 0 else np.array([])
        l_scores = 1 / (1 + np.exp(-l_scores)) if len(l_scores) > 0 else np.array([])

        s_bboxes_float32 = s_bboxes_raw[s_valid_indices, :].astype(np.float32) * self.s_bboxes_scale
        m_bboxes_float32 = m_bboxes_raw[m_valid_indices, :].astype(np.float32) * self.m_bboxes_scale
        l_bboxes_float32 = l_bboxes_raw[l_valid_indices, :].astype(np.float32) * self.l_bboxes_scale
        
        if s_bboxes_float32.size > 0:
            s_ltrb_indices = np.sum(softmax(s_bboxes_float32.reshape(-1, 4, self.REG), axis=2) * self.weights_static, axis=2)
            s_anchor_indices = self.s_anchor[s_valid_indices, :]
            s_x1y1 = s_anchor_indices - s_ltrb_indices[:, 0:2]
            s_x2y2 = s_anchor_indices + s_ltrb_indices[:, 2:4]
            s_dbboxes = np.hstack([s_x1y1, s_x2y2]) * 8
        else:
            s_dbboxes = np.empty((0,4), dtype=np.float32)

        if m_bboxes_float32.size > 0:
            m_ltrb_indices = np.sum(softmax(m_bboxes_float32.reshape(-1, 4, self.REG), axis=2) * self.weights_static, axis=2)
            m_anchor_indices = self.m_anchor[m_valid_indices, :]
            m_x1y1 = m_anchor_indices - m_ltrb_indices[:, 0:2]
            m_x2y2 = m_anchor_indices + m_ltrb_indices[:, 2:4]
            m_dbboxes = np.hstack([m_x1y1, m_x2y2]) * 16
        else:
            m_dbboxes = np.empty((0,4), dtype=np.float32)

        if l_bboxes_float32.size > 0:
            l_ltrb_indices = np.sum(softmax(l_bboxes_float32.reshape(-1, 4, self.REG), axis=2) * self.weights_static, axis=2)
            l_anchor_indices = self.l_anchor[l_valid_indices, :]
            l_x1y1 = l_anchor_indices - l_ltrb_indices[:, 0:2]
            l_x2y2 = l_anchor_indices + l_ltrb_indices[:, 2:4]
            l_dbboxes = np.hstack([l_x1y1, l_x2y2]) * 32
        else:
            l_dbboxes = np.empty((0,4), dtype=np.float32)


        dbboxes_list = []
        if s_dbboxes.shape[0] > 0: dbboxes_list.append(s_dbboxes)
        if m_dbboxes.shape[0] > 0: dbboxes_list.append(m_dbboxes)
        if l_dbboxes.shape[0] > 0: dbboxes_list.append(l_dbboxes)
        
        if not dbboxes_list: # No detections from any layer
            # logger.debug(f"Post Process time (no detections) = {1000 * (time() - begin_time):.2f} ms")
            return []

        dbboxes = np.concatenate(dbboxes_list, axis=0)
        scores = np.concatenate((s_scores, m_scores, l_scores), axis=0)
        ids = np.concatenate((s_ids, m_ids, l_ids), axis=0)

        hw = (dbboxes[:, 2:4] - dbboxes[:, 0:2])
        xyhw_for_nms = np.hstack([dbboxes[:, 0:2], hw])

        results = []
        for i in range(self.CLASSES_NUM):
            class_specific_indices = (ids == i)
            if not np.any(class_specific_indices):
                continue

            bboxes_for_class = xyhw_for_nms[class_specific_indices, :]
            scores_for_class = scores[class_specific_indices]
            original_dbboxes_for_class = dbboxes[class_specific_indices, :]

            # cv2.dnn.NMSBoxes expects bboxes in (x, y, w, h) and scores
            # It returns indices relative to the input bboxes_for_class
            nms_result = cv2.dnn.NMSBoxes(bboxes_for_class.tolist(), scores_for_class.tolist(),
                                                         self.SCORE_THRESHOLD, self.NMS_THRESHOLD)
            # selected_indices_in_class might be a tuple of an array, or just an array
            if isinstance(nms_result, tuple):
                # 可能是 (array_of_indices,), 也可能是 () 空 tuple
                if len(nms_result) > 0 and isinstance(nms_result[0], (list, np.ndarray)):
                    selected_indices = nms_result[0]
                else:
                    selected_indices = []
            else:
                # 如果直接返回了 list 或 ndarray
                selected_indices = nms_result

            # 如果没有任何保留的框，就跳过
            if isinstance(selected_indices, np.ndarray):
            # ndarray，要检查 .size
                if selected_indices.size == 0:
                        continue
            else:
                # list，就正常用 len()
                if len(selected_indices) == 0:
                    continue

            if isinstance(nms_result, (tuple, list, np.ndarray)):
                for sel_idx in nms_result:
                    x1, y1, x2, y2 = original_dbboxes_for_class[sel_idx]
                    
                    # Map coordinates back to original image
                    # Ensure self.x_scale is not zero to prevent division by zero
                    inv_x_scale = 1.0 / self.x_scale if self.x_scale != 0 else 1.0
                    inv_y_scale = 1.0 / self.y_scale if self.y_scale != 0 else 1.0

                    orig_x1 = int((x1 - self.x_shift) * inv_x_scale)
                    orig_y1 = int((y1 - self.y_shift) * inv_y_scale)
                    orig_x2 = int((x2 - self.x_shift) * inv_x_scale)
                    orig_y2 = int((y2 - self.y_shift) * inv_y_scale)

                    # Clip coordinates to image dimensions
                    orig_x1 = max(0, min(orig_x1, self.img_w))
                    orig_y1 = max(0, min(orig_y1, self.img_h))
                    orig_x2 = max(0, min(orig_x2, self.img_w))
                    orig_y2 = max(0, min(orig_y2, self.img_h))
                    
                    # Ensure x1 < x2 and y1 < y2
                    if orig_x1 >= orig_x2 or orig_y1 >= orig_y2:
                        continue

                    results.append((i, scores_for_class[sel_idx], orig_x1, orig_y1, orig_x2, orig_y2))
        
        # logger.debug(f"Post Process time = {1000 * (time() - begin_time):.2f} ms")
        return results

    def get_highest_score_per_class(self,detections):

        best_detections = {}
        for det in detections:
            class_id, score, x1, y1, x2, y2 = det
            if class_id not in best_detections or score > best_detections[class_id][1]:
                best_detections[class_id] = det
        return list(best_detections.values())

    def process_frame(self, image_bgr):
        """
        Processes a single BGR image frame for object detection.

        Args:
            image_bgr (np.array): The input image in BGR format.

        Returns:
            tuple: A tuple containing:
                - image_with_detections (np.array): The input image with detections drawn on it.
                - detections (list): A list of detection results, where each result is
                                     (class_id, score, x1, y1, x2, y2).
        """
        if image_bgr is None:
            logger.error("Input image to process_frame is None.")
            return None, []

        # 1. Preprocess
        input_tensor_nv12 = self._preprocess_yuv420sp(image_bgr)

        # 2. Forward pass (Inference)
        dnn_outputs = self._forward(input_tensor_nv12)

        # 3. Convert DPU outputs to NumPy arrays
        np_outputs = self._c2numpy(dnn_outputs)

        # 4. Postprocess
        detections = self._postprocess(np_outputs)
        detections = self.get_highest_score_per_class(detections)

        # 5. Draw detections on a copy of the image
        image_with_detections = image_bgr.copy()
        # for class_id, score, x1, y1, x2, y2 in detections:

        #     draw_detection_on_image(image_with_detections, (x1, y1, x2, y2), score, class_id, self.class_names, self.colors)

        return image_with_detections, detections


def example_usage():
    # --- Configuration for the detector ---
    model_file = '/home/sunrise/python_project1/model/yolov8n_detect_bayese_224x224_nv12_modified.bin' # Your model path
    test_image_path = '/home/sunrise/python_project1/data/123.jpg' # Your test image
    output_image_path = 'example_result.jpg'

    num_classes = 1 # Adjust based on your model
    conf_threshold = 0.25
    nms_threshold = 0.70
    dfl_reg = 16

    # Optional: provide custom class names and colors if your model is not COCO
    # custom_class_names = ["my_class1", "my_class2"]
    # custom_colors = [(0, 255, 0), (255, 0, 0)]

    logger.setLevel(logging.DEBUG) # Enable DEBUG logs for this example

    try:
        # 1. Initialize the detector
        detector = YOLOv8Detector(
            model_path=model_file,
            classes_num=num_classes,
            score_thres=conf_threshold,
            nms_thres=nms_threshold,
            reg=dfl_reg
            # class_names=custom_class_names, # Uncomment if using custom names
            # colors=custom_colors            # Uncomment if using custom colors
        )

        # 2. Load an image
        frame = cv2.imread(test_image_path)
        if frame is None:
            logger.error(f"Failed to load image: {test_image_path}")
            return

        # 3. Process the frame
        logger.info("Processing frame...")
        start_time_total = time()
        processed_frame, detected_objects = detector.process_frame(frame)
        end_time_total = time()
        logger.info(f"Total processing time: {1000 * (end_time_total - start_time_total):.2f} ms")


        # 4. Display or save results
        if processed_frame is not None:
            logger.info(f"Detections found: {len(detected_objects)}")
            for i, (class_id, score, x1, y1, x2, y2) in enumerate(detected_objects):
                class_name = detector.class_names[class_id] if class_id < len(detector.class_names) else f"ID:{class_id}"
                logger.info(f"  Detection {i+1}: Class='{class_name}' (ID:{class_id}), Score={score:.2f}, Box=({x1},{y1},{x2},{y2})")

            cv2.imwrite(output_image_path, processed_frame)
            logger.info(f"Result image saved to: {output_image_path}")

            # If you have a display environment (e.g., X11 forwarding or desktop)
            # cv2.imshow("Detections", processed_frame)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
        else:
            logger.error("Frame processing failed.")

    except Exception as e:
        logger.error(f"An error occurred during example_usage: {e}", exc_info=True)


