#!/usr/bin/env python3
import sys
import signal
import os
import numpy as np
import cv2
import colorsys
from time import time,sleep
import multiprocessing
from threading import BoundedSemaphore
import ctypes
import json
# Camera API libs

from hobot_vio import libsrcampy as srcampy
from hobot_dnn import pyeasy_dnn as dnn
import threading

image_counter = None
is_stop=False
output_tensors = None

fcos_postprocess_info = None
model_input_w = 640
model_input_h = 640
image_counter = None
is_stop=False
# def signal_handler(signal, frame):
#     sys.exit(0)
#     global is_stop
#     print("Stopping!\n")
#     is_stop=True

class hbSysMem_t(ctypes.Structure):
    _fields_ = [
        ("phyAddr",ctypes.c_double),
        ("virAddr",ctypes.c_void_p),
        ("memSize",ctypes.c_int)
    ]

class hbDNNQuantiShift_yt(ctypes.Structure):
    _fields_ = [
        ("shiftLen",ctypes.c_int),
        ("shiftData",ctypes.c_char_p)
    ]

class hbDNNQuantiScale_t(ctypes.Structure):
    _fields_ = [
        ("scaleLen",ctypes.c_int),
        ("scaleData",ctypes.POINTER(ctypes.c_float)),
        ("zeroPointLen",ctypes.c_int),
        ("zeroPointData",ctypes.c_char_p)
    ]

class hbDNNTensorShape_t(ctypes.Structure):
    _fields_ = [
        ("dimensionSize",ctypes.c_int * 8),
        ("numDimensions",ctypes.c_int)
    ]

class hbDNNTensorProperties_t(ctypes.Structure):
    _fields_ = [
        ("validShape",hbDNNTensorShape_t),
        ("alignedShape",hbDNNTensorShape_t),
        ("tensorLayout",ctypes.c_int),
        ("tensorType",ctypes.c_int),
        ("shift",hbDNNQuantiShift_yt),
        ("scale",hbDNNQuantiScale_t),
        ("quantiType",ctypes.c_int),
        ("quantizeAxis", ctypes.c_int),
        ("alignedByteSize",ctypes.c_int),
        ("stride",ctypes.c_int * 8)
    ]

class hbDNNTensor_t(ctypes.Structure):
    _fields_ = [
        ("sysMem",hbSysMem_t * 4),
        ("properties",hbDNNTensorProperties_t)
    ]


# class FcosPostProcessInfo_t(ctypes.Structure):
#     _fields_ = [
#         ("height",ctypes.c_int),
#         ("width",ctypes.c_int),
#         ("ori_height",ctypes.c_int),
#         ("ori_width",ctypes.c_int),
#         ("score_threshold",ctypes.c_float),
#         ("nms_threshold",ctypes.c_float),
#         ("nms_top_k",ctypes.c_int),
#         ("is_pad_resize",ctypes.c_int)
#     ]


# libpostprocess = ctypes.CDLL('/usr/lib/libpostprocess.so')

# get_Postprocess_result = libpostprocess.FcosPostProcess
# get_Postprocess_result.argtypes = [ctypes.POINTER(FcosPostProcessInfo_t)]
# get_Postprocess_result.restype = ctypes.c_char_p

def get_TensorLayout(Layout):
    if Layout == "NCHW":
        return int(2)
    else:
        return int(0)
is_stop=False

def signal_handler(signal, frame):
    global is_stop
    print("Stopping!\n")
    is_stop=True
    sys.exit(0)


def get_display_res():
    if os.path.exists("/usr/bin/get_hdmi_res") == False:
        return 1920, 1080

    import subprocess
    p = subprocess.Popen(["/usr/bin/get_hdmi_res"], stdout=subprocess.PIPE)
    result = p.communicate()
    res = result[0].split(b',')
    res[1] = max(min(int(res[1]), 1920), 0)
    res[0] = max(min(int(res[0]), 1080), 0)
    return int(res[1]), int(res[0])

disp_w, disp_h = get_display_res()

# detection model class names
def get_classes():
    return np.array([
        "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
        "truck", "boat", "traffic light", "fire hydrant", "stop sign",
        "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep",
        "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
        "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
        "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
        "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork",
        "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
        "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
        "couch", "potted plant", "bed", "dining table", "toilet", "tv",
        "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave",
        "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
        "scissors", "teddy bear", "hair drier", "toothbrush"
    ])


def get_hw(pro):
    if pro.layout == "NCHW":
        return pro.shape[2], pro.shape[3]
    else:
        return pro.shape[1], pro.shape[2]

def print_properties(pro):
    print("tensor type:", pro.tensor_type)
    print("data type:", pro.dtype)
    print("layout:", pro.layout)
    print("shape:", pro.shape)


class ParallelExector(object):
    def __init__(self, counter, parallel_num=4):
        global image_counter
        image_counter = counter
        self.parallel_num = parallel_num
        if parallel_num != 1:
            self._pool = multiprocessing.Pool(processes=self.parallel_num,
                                              maxtasksperchild=5)
            self.workers = BoundedSemaphore(self.parallel_num)

    def infer(self, output):
        if self.parallel_num == 1:
            run(output)
        else:
            self.workers.acquire()
            self._pool.apply_async(func=run,
                                   args=(output, ),
                                   callback=self.task_done,
                                   error_callback=print)

    def task_done(self, *args, **kwargs):
        """Called once task is done, releases the queue is blocked."""
        self.workers.release()

    def close(self):
        if hasattr(self, "_pool"):
            self._pool.close()
            self._pool.join()


def limit_display_cord(coor):
    coor[0] = max(min(disp_w, coor[0]), 0)
    # min coor is set to 2 not 0, leaving room for string display
    coor[1] = max(min(disp_h, coor[1]), 2)
    coor[2] = max(min(disp_w, coor[2]), 0)
    coor[3] = max(min(disp_h, coor[3]), 0)
    return coor


def scale_bbox(bbox, input_w, input_h, output_w, output_h):

    scale_x = output_w / input_w
    scale_y = output_h / input_h

    x1 = int(bbox[0] * scale_x)
    y1 = int(bbox[1] * scale_y)
    x2 = int(bbox[2] * scale_x)
    y2 = int(bbox[3] * scale_y)

    return [x1, y1, x2, y2]


def run(outputs):
    global image_counter, disp, colors, model_input_w, model_input_h, start_time
    model_input_w = 640  # 模型输入宽度
    model_input_h = 640  # 模型输入高度
    conf_thres = 0.5     # 人脸置信度阈值（0~1）
    iou_thres = 0.6      # NMS阈值
    classes = get_classes()  # ["face"]
    all_boxes = []        # 存储所有尺度的人脸框（x1,y1,x2,y2）
    all_confs = []        # 存储对应置信度

    # -------------------------- 1. 修复：按PyDNNTensor格式获取输出，分组解析多尺度 --------------------------
    # 模型6个输出按尺度分组：[(热力图0, 回归参数0), (热力图1, 回归参数1), (热力图2, 回归参数2)]
    scale_groups = [
        (outputs[0], outputs[1]),  # 80x80尺度
        (outputs[2], outputs[3]),  # 40x40尺度
        (outputs[4], outputs[5])   # 20x20尺度
    ]
    # 每个尺度对应的“特征图→原图”缩放比例（640/特征图尺寸）
    scales = [640/80, 640/40, 640/20]  # 8, 16, 32

    # 遍历每个尺度的热力图和回归参数
    for idx, (heatmap_tensor, reg_tensor) in enumerate(scale_groups):
        scale = scales[idx]  # 当前尺度的缩放比例
        
        # -------------------------- 2. 提取热力图（置信度） --------------------------
        # 获取热力图形状（PyDNNTensor需用properties.shape）
        heatmap_shape = heatmap_tensor.properties.shape  # (1, 80, 80, 1) 或类似
        # 去除batch和通道维度，得到(H, W)（如80x80）
        heatmap = heatmap_tensor.buffer  # 先获取数据缓冲区（numpy数组）
        heatmap = heatmap.reshape(heatmap_shape[1], heatmap_shape[2])  # (H, W)
        
        # 热力图sigmoid激活（转为0~1置信度，模型输出可能是logits）
        heatmap = 1 / (1 + np.exp(-heatmap))

        # -------------------------- 3. 提取回归参数（偏移量+宽高） --------------------------
        reg_shape = reg_tensor.properties.shape  # (1, 80, 80, 64) 或类似
        reg_params = reg_tensor.buffer  # 回归参数缓冲区（numpy数组）
        reg_params = reg_params.reshape(reg_shape[1], reg_shape[2], reg_shape[3])  # (H, W, 64)

        # -------------------------- 4. 从热力图提取高置信度人脸中心，计算人脸框 --------------------------
        H, W = heatmap.shape
        for y in range(H):  # 遍历热力图的y坐标（特征图）
            for x in range(W):  # 遍历热力图的x坐标（特征图）
                conf = heatmap[y, x]
                if conf < conf_thres:
                    continue  # 过滤低置信度点

                # 提取回归参数（假设前4通道：dx=偏移x, dy=偏移y, w=宽, h=高）
                # 注意：通道含义需根据你的模型调整！若检测框异常，需修改通道索引
                dx = reg_params[y, x, 0]  # x方向偏移（特征图坐标→人脸中心实际坐标）
                dy = reg_params[y, x, 1]  # y方向偏移
                w = reg_params[y, x, 2]   # 人脸宽度（特征图尺度）
                h = reg_params[y, x, 3]   # 人脸高度（特征图尺度）

                # -------------------------- 5. 特征图坐标→原图坐标 --------------------------
                # 特征图上的(x,y) → 原图坐标（加偏移，乘缩放比例）
                center_x = (x + dx) * scale  # 人脸中心x（原图）
                center_y = (y + dy) * scale  # 人脸中心y（原图）
                # 人脸宽高→原图尺度（乘缩放比例）
                w_orig = w * scale
                h_orig = h * scale

                # 转为左上角/右下角坐标（x1,y1,x2,y2）
                x1 = center_x - w_orig / 2
                y1 = center_y - h_orig / 2
                x2 = center_x + w_orig / 2
                y2 = center_y + h_orig / 2

                # 确保坐标在原图范围内（0~640）
                x1 = max(0, min(x1, model_input_w))
                y1 = max(0, min(y1, model_input_h))
                x2 = max(0, min(x2, model_input_w))
                y2 = max(0, min(y2, model_input_h))

                # 加入全局列表
                all_boxes.append([x1, y1, x2, y2])
                all_confs.append(conf)

    # -------------------------- 6. 无检测结果时更新FPS --------------------------
    if len(all_boxes) == 0:
        with image_counter.get_lock():
            image_counter.value += 1
        if image_counter.value == 100:
            finish_time = time()
            fps = 100 / (finish_time - start_time)
            print(f"100 frames | Total time: {finish_time - start_time:.2f}s | FPS: {fps:.2f}")
        return

    # -------------------------- 7. 执行NMS（去除重复框） --------------------------
    # 转换为OpenCV NMS要求的格式
    all_boxes_np = np.array(all_boxes, dtype=np.int32)
    all_confs_np = np.array(all_confs)
    # 调用NMS（注意：OpenCV NMS要求bbox格式为[x1,y1,w,h]，需转换）
    all_boxes_nms = []
    for box in all_boxes_np:
        x1, y1, x2, y2 = box
        all_boxes_nms.append([x1, y1, x2-x1, y2-y1])  # 转为[x1,y1,w,h]

    indices = cv2.dnn.NMSBoxes(
        bboxes=all_boxes_nms,
        scores=all_confs_np.tolist(),
        score_threshold=conf_thres,
        nms_threshold=iou_thres,
        top_k=50  # 最多保留50个人脸框
    )
    indices = indices.flatten()  # 展平索引数组

    # -------------------------- 8. 绘制检测结果到显示设备 --------------------------
    if len(indices) > 0:
        for idx in indices:
            if idx < 0 or idx >= len(all_boxes):
                print(f"无效索引{idx}，跳过")
                continue

            # 获取NMS后的框和置信度
            x1, y1, x2, y2 = all_boxes[idx]
            conf = all_confs[idx]
            class_name = classes[0]  # 固定为"face"

            # 缩放框到显示分辨率（如1920x1080）
            bbox_scaled = scale_bbox(
                bbox=[x1, y1, x2, y2],
                input_w=model_input_w,
                input_h=model_input_h,
                output_w=disp_w,
                output_h=disp_h
            )
            # 确保坐标在显示范围内
            bbox_clamped = limit_display_cord(bbox_scaled)
            bbox_clamped = [round(i) for i in bbox_clamped]

            # 绘制配置（固定红色）
            draw_text = f"{class_name}: {conf:.2f}".encode('gb2312')
            box_color = (0, 0, 255)  # RGB红色
            argb_color = 0xFF000000 | (box_color[0] << 16) | (box_color[1] << 8) | box_color[2]

            # 打印检测结果
            print(f"检测到：{class_name} | 置信度：{conf:.4f} | 位置：{bbox_clamped}")

            # 绘制边界框（地平线disp API）
            disp.set_graph_rect(
                x=bbox_clamped[0],
                y=bbox_clamped[1],
                w=bbox_clamped[2] - bbox_clamped[0],
                h=bbox_clamped[3] - bbox_clamped[1],
                line_width=3,
                flush=1 if idx == indices[-1] else 0,  # 最后一个框刷新缓冲区
                color=argb_color
            )

            # 绘制文本
            disp.set_graph_word(
                x=bbox_clamped[0],
                y=max(0, bbox_clamped[1] - 20),  # 框上方20像素
                text=draw_text,
                font_size=3,
                flush=1 if idx == indices[-1] else 0,
                color=argb_color
            )

    # -------------------------- 9. 更新FPS计数器 --------------------------
    with image_counter.get_lock():
        image_counter.value += 1
    if image_counter.value == 100:
        finish_time = time()
        fps = 100 / (finish_time - start_time)
        print(f"100 frames | Total time: {finish_time - start_time:.2f}s | FPS: {fps:.2f}")

if __name__ == '__main__':
    signal.signal(signal.SIGINT, signal_handler)

    models = dnn.load('../models/yolov8n_detect_bayese_640x640_nv12.bin')
    print("--- model input properties ---")
    # 打印输入 tensor 的属性
    print_properties(models[0].inputs[0].properties)
    print("--- model output properties ---")
    # 打印输出 tensor 的属性
    for output in models[0].outputs:
        print_properties(output.properties)
    # for i, output in enumerate(models[0].outputs):
    #     print(f"Output {i}:")
    #     print("tensor type:", output.properties.tensor_type)
    #     print("data type:", output.properties.dtype)
    #     print("layout:", output.properties.layout)
    #     print("shape:", output.properties.shape)  # 正确：用properties.shape
    # Camera API, get camera object
    cam = srcampy.Camera()

    # get model info
    h, w = get_hw(models[0].inputs[0].properties)
    input_shape = (h, w)
    # Open f37 camera
    # For the meaning of parameters, please refer to the relevant documents of camera
    cam.open_cam(0, -1, 30, [w, disp_w], [h, disp_h])

    # Get HDMI display object
    disp = srcampy.Display()
    # For the meaning of parameters, please refer to the relevant documents of HDMI display
    disp.display(0, disp_w, disp_h)

    # bind camera directly to display
    srcampy.bind(cam, disp)

    # change disp for bbox display
    disp.display(3, disp_w, disp_h)

    # setup for box color and box string
    classes = get_classes()
    num_classes = len(classes)
    hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(
        map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
            colors))

    # fps timer and counter
    start_time = time()
    image_counter = multiprocessing.Value("i", 0)

    # post process parallel executor
    parallel_exe = ParallelExector(image_counter)

    while not is_stop:
        # image_counter += 1
        # Get image data with shape of 512x512 nv12 data from camera
        cam_start_time = time()
        img = cam.get_img(2, 640, 640)
        if img is None:
            print("摄像头未返回数据，跳过")
            continue
        # NV12格式需reshape为 (height*3//2, width) → 640*3//2=960，即(960,640)
        img = np.frombuffer(img, dtype=np.uint8).reshape(960, 640)  # 关键修正
        outputs = models[0].forward(img)  # 传入二维NV12数组
        cam_finish_time = time()

        # # Convert to numpy
        # buffer_start_time = time()
        # img = np.frombuffer(img, dtype=np.uint8)
        # buffer_finish_time = time()

        # # Forward
        # infer_start_time = time()
        # outputs = models[0].forward(img)
        # infer_finish_time = time()

        output_array = []
        for item in outputs:
            output_array.append(item.buffer)
        parallel_exe.infer(output_array)
    cam.close_cam()
    disp.close()
