import struct
import ctypes
import pylynchipsdk as sdk
from common.python.infer_process import *
from common.python.callback_data_struct import *
from ctypes import *
import ipeParamYolov8
import ipeParamSlowfast
from common.python.plugin_utils import *
from common.python.common import *

pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]

NUM_FRAMES = 32
MEAN, STD = [0.45, 0.45, 0.45], [0.225, 0.225, 0.225]
ALPHA = 4
CROP_SIZE = 256
BOX_SIZE = 64

with open("ava.json") as f:
    labels = json.loads(f.read())

def scale_boxes(size, boxes, height, width):
    """
    Scale the short side of the box to size.
    Args:
        size (int): size to scale the image.
        boxes (ndarray): bounding boxes to peform scale. The dimension is
        `num boxes` x 4.
        height (int): the height of the image.
        width (int): the width of the image.
    Returns:
        boxes (ndarray): scaled bounding boxes.
    """
    if (width <= height and width == size) or (
            height <= width and height == size
    ):
        return boxes

    new_width = size
    new_height = size
    if width < height:
        new_height = int(math.floor((float(height) / width) * size))
        boxes *= float(new_height) / height
    else:
        new_width = int(math.floor((float(width) / height) * size))
        boxes *= float(new_width) / width
    return boxes

def pack_pathway_output(frames):
    """
    Prepare output as a list of arrays (replacing tensors).
    Args:
        frames (ndarray): Shape (C, T, H, W).
    Returns:
        list: List of arrays for each pathway.
    """
    # Fast pathway (original frames)
    fast_pathway = frames
    # Slow pathway (temporally subsampled)
    alpha = ALPHA
    slow_indices = np.linspace(0, frames.shape[1] - 1, frames.shape[1] // alpha).astype(int)
    slow_pathway = frames[:, slow_indices, :, :]
    return [slow_pathway, fast_pathway]

def array_normalize(array, mean, std):
    """
    Normalize array by mean and std.
    Args:
        array (ndarray): Input array.
        mean (list/ndarray): Mean values.
        std (list/ndarray): Std values.
    """
    if array.dtype == np.uint8:
        array = array.astype(np.float32) / 255.0
    mean = np.array(mean, dtype=np.float32)
    std = np.array(std, dtype=np.float32)
    return (array - mean) / std

def process_cv2_inputs(frames):
    """
    Process video frames without PyTorch.
    Args:
        frames (list): List of images (H, W, C) in [0, 255].
    Returns:
        list: List of pathway inputs, each shaped (1, C, T, H, W).
    """
    # # Convert to numpy array and normalize
    # inputs = np.array(frames, dtype=np.float32) / 255.0
    # inputs = array_normalize(inputs, MEAN, STD)  # e.g., mean=[0.45, 0.45, 0.45]
    inputs = np.array(frames, dtype=np.float32)

    # Reorder axes: (T, H, W, C) -> (C, T, H, W)
    inputs = np.transpose(inputs, (3, 0, 1, 2))
    
    # Temporal sampling
    num_frames = NUM_FRAMES
    indices = np.linspace(0, inputs.shape[1] - 1, num_frames).astype(int)
    inputs = inputs[:, indices, :, :]
    
    # Split pathways
    pathway_inputs = pack_pathway_output(inputs)
    
    # Add batch dimension: (C, T, H, W) -> (1, C, T, H, W)
    return [np.expand_dims(x, axis=0).transpose(0,2,3,4,1) for x in pathway_inputs]

def dump_box_json(output_path, c_box):
    if c_box.boxesnum <= 0:
        return
    box_dict = {"boxesnum": c_box.boxesnum, "boxes": []}
    for i in range(c_box.boxesnum):
        bbox_instance = c_box.boxes[i]
        bbox_dict = {
            "xmin": bbox_instance.xmin,
            "ymin": bbox_instance.ymin,
            "xmax": bbox_instance.xmax,
            "ymax": bbox_instance.ymax,
            "score": bbox_instance.score,
            "label": bbox_instance.label.decode("utf-8"),
        }
        box_dict["boxes"].append(bbox_dict)

    # Serialize the dictionary to JSON
    with open(output_path, "a", encoding="utf-8") as f:
        f.seek(0, os.SEEK_END)
        file_size = f.tell()
        if file_size < 500 * 1024 * 1024:
            json.dump(box_dict, f, indent=2, ensure_ascii=False)
            f.write("\n")
        else:
            print("only support 500M.", output_path, "now file size:", file_size)


def lyn_free_callback(args):
    for i in args:
        common.error_check(sdk.lyn_free(i))
    return 0


def ipe_destory_callback(args):
    for i in args:
        i.destory()
    return 0


def return_buffer(callback_data) -> int:
    """将buffer还回buffer pool中

    Args:
        callback_data (list): 大小为2的list：[buffer pool, buffer]

    Returns:
        _type_: integer，默认为0
    """
    callback_data[0].push(callback_data[1])
    return 0


def free_device_memory(mem) -> int:
    """释放lyn_malloc申请的内存

    Args:
        mem (): device侧的内存地址

    Returns:
        _type_: integer，默认为0
    """
    sdk.lyn_free(mem)
    return 0


def return_buffers(callback_data) -> int:
    """将列表中的buffer还回buffer pool中

    Args:
        callback_data (list): 大小为2的list：[buffer pool, buffers]，第二个元素为buffer组成的list

    Returns:
        _type_: integer，默认为0
    """
    for buffer in callback_data[1]:
        callback_data[0].push(buffer)
    return 0


def put_to_queue(callback_data) -> int:
    """将元素放入queue中

    Args:
        callback_data (list): 大小为2的list：[queue, element]

    Returns:
        _type_: integer，默认为0
    """
    callback_data[0].put(callback_data[1])
    return 0


class Encoder:
    """视频编码类"""

    def __init__(self, ctx, framepool, vdec_out_info, output_path):
        self.frame_pool = framepool
        sdk.lyn_set_current_context(ctx)
        self.venc_recv_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")
        self.venc_send_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")
        self.enc_head_flag = True
        venc_attr = sdk.lyn_venc_attr_t()
        ret = sdk.lyn_venc_set_default_params(venc_attr)
        common.error_check(ret, "lyn_venc_set_default_params")
        venc_attr.codec_type = sdk.lyn_codec_id_t.LYN_CODEC_ID_H264
        self.vdec_out_info = vdec_out_info
        venc_attr.width = self.vdec_out_info.width
        venc_attr.height = self.vdec_out_info.height
        venc_attr.bit_depth = 8
        venc_attr.bframes_num = 0
        venc_attr.pframes_num = 5
        venc_attr.input_format = sdk.lyn_pixel_format_t.LYN_PIX_FMT_NV12
        venc_attr.target_bitrate = 6000000
        venc_attr.level = -1
        self.venc_handle, ret = sdk.lyn_venc_open(venc_attr)
        common.error_check(ret, "lyn_venc_open")
        self.output_path = output_path
        self.venc_recv_pool = bufferpool.buffer_pool(
            self.vdec_out_info.predict_buf_size, 5
        )

    def encode(
        self,
        frame,
    ):
        if self.enc_head_flag:
            self.enc_head_flag = False
            enc_packet = sdk.lyn_packet_t()
            enc_packet.size = self.vdec_out_info.predict_buf_size
            enc_packet.data = self.venc_recv_pool.pop()
            encode_data = save_file_cb_data()
            encode_data.output_path = self.output_path
            encode_data.packet = enc_packet
            encode_data.recv_pool = self.venc_recv_pool
            encode_data.file_path = self.output_path
            ret = sdk.lyn_venc_get_paramsset_async(
                self.venc_recv_stream, self.venc_handle, enc_packet
            )
            common.error_check(ret, "lyn_venc_get_paramsset_async")
            ret = sdk.lyn_stream_add_callback(
                self.venc_recv_stream, save_file_cb, encode_data
            )
            common.error_check(ret, "lyn_stream_add_callback")
            
        ret = sdk.lyn_venc_sendframe_async(
            self.venc_send_stream, self.venc_handle, frame
        )
        common.error_check(ret, "lyn_venc_sendframe_async")
        enc_packet = sdk.lyn_packet_t()
        enc_packet.size = self.vdec_out_info.predict_buf_size
        enc_packet.eos = frame.eos
        enc_packet.data = self.venc_recv_pool.pop()
        encode_data = save_file_cb_data()
        encode_data.packet = enc_packet
        encode_data.recv_pool = self.venc_recv_pool
        encode_data.file_path = self.output_path
        encode_data.output_path = self.output_path

        ret = sdk.lyn_stream_add_callback(
            self.venc_send_stream,
            free_to_pool_callback,
            [self.frame_pool, frame.data],
        )
        common.error_check(ret, "lyn_stream_add_callback")
        ret = sdk.lyn_venc_recvpacket_async(
            self.venc_recv_stream, self.venc_handle, enc_packet
        )

        common.error_check(ret, "lyn_venc_recvpacket_async")
        ret = sdk.lyn_stream_add_callback(
            self.venc_recv_stream, save_file_cb, encode_data
        )
        common.error_check(ret, "lyn_stream_add_callback")

    def __del__(self):
        sdk.lyn_synchronize_stream(self.venc_recv_stream)
        sdk.lyn_synchronize_stream(self.venc_send_stream)
        sdk.lyn_destroy_stream(self.venc_recv_stream)
        sdk.lyn_destroy_stream(self.venc_send_stream)


class OpencvWindow:
    """opencv窗口类"""

    def __init__(self, frame_pool, video_frame, vdec_out_info):
        self.frame_pool = frame_pool
        self.video_frame = video_frame
        self.vdec_out_info = vdec_out_info

    def show(self, frame):
        data = np.empty(frame.size, dtype=np.uint8)
        data_ptr = sdk.lyn_numpy_to_ptr(data)
        ret = sdk.lyn_memcpy(
            data_ptr,
            frame.data,
            frame.size,
            sdk.lyn_memcpy_dir_t.ServerToClient,
        )
        common.error_check(ret, "lyn_memcpy")
        yuvImg = np.reshape(
            data, (self.vdec_out_info.height * 3 // 2, self.vdec_out_info.width)
        ).astype(np.uint8)
        if frame.eos:
            self.video_frame.put([yuvImg, frame.eos])
        else:
            # self.video_frame.queue.clear()
            self.video_frame.put([yuvImg, frame.eos])
        self.frame_pool.push(frame.data)
        return 0


class Recognition:
    def __init__(self):
        self.device_id = 0
        self.channel_id = 0
        self.model_path = ""
        self.show_type = 1
        self.video_frame = ""
        self.input_file = ""
        self.output_path = ""
        self.post_plugin_path = ""
        self.osd_plugin_path = ""
        self.detect_model_path = ""
        self.recog_model_path = ""
        self.print_stats = 0

    def init(self):
        self.__ctx, ret = sdk.lyn_create_context(self.device_id)
        self.use_local_input = os.path.exists(self.input_file)

        common.error_check(ret, "lyn_create_context")

        self.init_demuxer()
        self.init_decoder()
        self.create_queue()
        self.register_plugin()

    def init_decoder(self):
        sdk.lyn_set_current_context(self.__ctx)
        self.__vdec_attr = sdk.lyn_vdec_attr_t()
        self.__vdec_attr.codec_id = self.codec_para.codec_id
        self.__vdec_attr.output_fmt = sdk.lyn_pixel_format_t.LYN_PIX_FMT_NV12
        self.__vdec_attr.scale = sdk.lyn_scale_t.SCALE_NONE
        self.__vdec_hdl, ret = sdk.lyn_vdec_open(self.__vdec_attr)
        common.error_check(ret, "lyn_vdec_open")
        self.vdec_out_info, ret = sdk.lyn_vdec_get_out_info(
            self.codec_para, self.__vdec_attr
        )
        common.error_check(ret, "lyn_vdec_get_out_info")
        self.frame_pool = bufferpool.buffer_pool(self.vdec_out_info.predict_buf_size, 5)
        self.frame_pool_copy = bufferpool.buffer_pool(self.vdec_out_info.predict_buf_size, BOX_SIZE)

    def init_demuxer(self):
        self.__demux_hdl, ret = sdk.lyn_demux_open(self.input_file)
        common.error_check(ret, "lyn_demux_open")
        self.codec_para, ret = sdk.lyn_demux_get_codec_para(self.__demux_hdl)
        common.error_check(ret, "lyn_demux_get_codec_para")

    def create_queue(self):
        self.__send_queue = block_queue()
        self.__recv_queue = block_queue()
        self.__detect_result_queue = block_queue()
        self.__recognize_result_queue = block_queue()

    def register_plugin(self):
        self.post_plugin, ret = sdk.lyn_plugin_register(self.post_plugin_path)
        common.error_check(ret, "lyn_plugin_register")
        self.osd_plugin, ret = sdk.lyn_plugin_register(self.osd_plugin_path)
        common.error_check(ret, "lyn_plugin_register")

    def run(self, cancel_flag):
        self.__decode_send_thread = threading.Thread(
            target=self.decoder_send, args=(cancel_flag,)
        )
        self.__decode_send_thread.start()

        # 开启接收线程
        self.__decode_recv_thread = threading.Thread(
            target=self.decoder_recv, args=(cancel_flag,)
        )
        self.__decode_recv_thread.start()

        # 开启检测线程
        self.__detect_thread = threading.Thread(
            target=self.detect, args=(cancel_flag,)
        )
        self.__detect_thread.start()

        # 开启识别测线程
        self.__recognize_thread = threading.Thread(
            target=self.recognize, args=(cancel_flag,)
        )
        self.__recognize_thread.start()

        # 开启展示或保存线程
        self.__generate_result_thread = threading.Thread(
            target=self.generate_result, args=(cancel_flag,)
        )
        self.__generate_result_thread.start()

    def decoder_send(self, cancel_flag):
        # 设置上下文环境 创建发送stream
        sdk.lyn_set_current_context(self.__ctx)
        send_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")
        eos, reconnect = False, False
        while not eos:
            # 从解封装器读取一个包
            pkt, ret = sdk.lyn_demux_read_packet(self.__demux_hdl)
            # common.error_check(ret, "lyn_demux_read_packet")
            eos = pkt.eos
            reconnect = ret == 101003
            if reconnect:
                eos = False
                self.__send_queue.put((eos, reconnect))
                ret = sdk.lyn_vdec_send_packet_async(send_stream, self.__vdec_hdl, pkt)
                self.__decode_recv_thread.join()
                self.__generate_result_thread.join()
                self.frame_pool.free_buffers()
                sdk.lyn_vdec_close(self.__vdec_hdl)
                sdk.lyn_demux_close(self.__demux_hdl)

                self.init_demuxer()
                self.init_decoder()
                print(
                    f"cnannel {self.device_id}_{self.channel_id} changed resolution to: {self.vdec_out_info.width}, {self.vdec_out_info.height}"
                )

                self.__decode_recv_thread = threading.Thread(
                    target=self.decoder_recv, args=(cancel_flag,)
                )
                self.__decode_recv_thread.start()
                self.__generate_result_thread = threading.Thread(
                    target=self.generate_result, args=(cancel_flag,)
                )
                self.__generate_result_thread.start()
                continue

            if (eos or ret == lynEEOF) and not self.use_local_input:
                sdk.lyn_demux_close(self.__demux_hdl)
                time.sleep(500.0 / 1000)
                print("demux failed, reconnecting...")
                self.__demux_hdl, ret = sdk.lyn_demux_open(self.input_file)
                common.error_check(ret, "lyn_demux_open")
                eos = False
                continue

            if cancel_flag.value:
                eos = True

            # 发送给解码器解码
            ret = sdk.lyn_vdec_send_packet_async(send_stream, self.__vdec_hdl, pkt)
            common.error_check(ret, "lyn_vdec_send_packet_async")
            ret = sdk.lyn_synchronize_stream(send_stream)
            common.error_check(ret, "lyn_synchronize_stream")
            # 释放packet内存并通知接收结果
            if not eos:
                sdk.lyn_demux_free_packet(pkt)
            self.__send_queue.put((eos, reconnect))


    def decoder_recv(self, cancel_flag):
        sdk.lyn_set_current_context(self.__ctx)
        recv_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")
        eos, reconnect = False, False
        while not eos:
            eos, reconnect = self.__send_queue.take()
            if reconnect:
                cb_data = recv_cb_data()
                cb_data.reconnect = reconnect
                ret = sdk.lyn_stream_add_callback(
                    recv_stream, put_to_queue, [self.__recv_queue, cb_data]
                )
                self.__send_queue.clear()
                break

            recv_frame = sdk.lyn_frame_t()
            recv_frame.eos = eos
            recv_frame.data = self.frame_pool.pop()
            recv_frame.size = self.vdec_out_info.predict_buf_size

            ret = sdk.lyn_vdec_recv_frame_async(
                recv_stream, self.__vdec_hdl, recv_frame
            )
            common.error_check(ret, "lyn_vdec_recv_frame_async")

            #这里需要拷贝到frame_pool_copy的64个buf里，供后面一次推理64帧用
            cb_data = recv_cb_data()
            cb_data.frame.eos = eos
            cb_data.frame.data = self.frame_pool_copy.pop()
            cb_data.frame.size = self.vdec_out_info.predict_buf_size
            cb_data.frame_pool = self.frame_pool_copy
            cb_data.block_queue = self.__recv_queue
            cb_data.video_frame = self.video_frame

            common.error_check(sdk.lyn_memcpy_async(
                recv_stream,
                cb_data.frame.data,
                recv_frame.data,
                self.vdec_out_info.predict_buf_size,
                sdk.lyn_memcpy_dir_t.ServerToServer,
            ), "decoder_recv lyn_memcpy_async")

            common.error_check(sdk.lyn_stream_add_callback(
                recv_stream, free_to_pool_callback, [self.frame_pool, recv_frame.data]
            ))

            ret = sdk.lyn_stream_add_callback(
                recv_stream, put_to_queue, [self.__recv_queue, cb_data]
            )
            common.error_check(ret, "lyn_stream_add_callback")


    def detect_callback(self, callback_data):
        """将后处理结果复制回来，将frame和box信息放入检测结果队列"""
        if not callback_data[0][0].host_box_info:
            size = ctypes.sizeof(LynPoseInfo)
            host_buf_arr = np.zeros(size, dtype=np.uint8)
            host_buf = sdk.lyn_numpy_to_ptr(host_buf_arr)
            ret = sdk.lyn_memcpy(
                host_buf,
                callback_data[1],
                size,
                sdk.lyn_memcpy_dir_t.ServerToClient,
            )
            common.error_check(ret, "detect_callback lyn_memcpy")
            callback_data[0][0].host_box_info = host_buf_arr
        self.__detect_result_queue.put(callback_data[0])
        return 0

    def detect(self, cancel_flag):
        sdk.lyn_set_current_context(self.__ctx)
        ipe_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")
        apu_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")
        post_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")
        ipe_event, ret = sdk.lyn_create_event()
        common.error_check(ret, "lyn_create_event")
        apu_event, ret = sdk.lyn_create_event()
        common.error_check(ret, "lyn_create_event")
        detect_model, ret = sdk.lyn_load_model(self.detect_model_path)
        common.error_check(ret, "lyn_load_model")

        # 获取模型信息
        model_desc, ret = sdk.lyn_model_get_desc(detect_model)
        common.error_check(ret, "lyn_model_get_desc")
        batch_size = model_desc.inputTensorAttrArray[0].batchSize
        model_width = model_desc.inputTensorAttrArray[0].dims[2]
        model_height = model_desc.inputTensorAttrArray[0].dims[1]

        model_output_width = model_desc.outputTensorAttrArray[0].dims[2]
        model_output_height = model_desc.outputTensorAttrArray[0].dims[1]

        ipe_output_size = (
            model_width * model_height * model_desc.inputTensorAttrArray[0].dims[3]
        )
        apu_output_size = model_desc.outputDataLen
        apu_buffer_pool = bufferpool.buffer_pool(apu_output_size, 1)
        ipe_buffer_pool = bufferpool.buffer_pool(ipe_output_size, 1)

        # 初始化IPE
        self.ipeYolov8 = ipeParamYolov8.IpeParamYolov8(model_width, model_height)
        self.ipeYolov8.set_img_info(
            self.vdec_out_info.width,
            self.vdec_out_info.height,
            sdk.lyn_pixel_format_t.LYN_PIX_FMT_NV12,
        )

        pose_info_apu_ptr, ret = sdk.lyn_malloc(ctypes.sizeof(LynPoseInfo))
        common.error_check(ret)
        pose_info_apu_ptr_c = pythonapi.PyCapsule_GetPointer(pose_info_apu_ptr, None)

        eos = False
        while not eos:
            reconnect = False
            detect_frame_clip = []
            for _ in range(BOX_SIZE):
                cb_data = self.__recv_queue.take()

                eos = cb_data.frame.eos
                if cb_data.reconnect:
                    reconnect = True
                    if detect_frame_clip:
                        detect_frame_clip[-1].reconnect = cb_data.reconnect
                    sdk.lyn_stream_add_callback(
                        post_stream, put_to_queue, [self.__detect_result_queue, detect_frame_clip]
                    )           
                    self.__recv_queue.clear()
                    detect_frame_clip.clear()
                    break
                else:
                    df = detect_frame()
                    df.frame = cb_data.frame
                    detect_frame_clip.append(df)
                
                if eos:
                    break

            if reconnect:
                continue
            elif eos:
                sdk.lyn_stream_add_callback(
                    post_stream, put_to_queue, [self.__detect_result_queue, detect_frame_clip]
                )
                break

            ipe_buffer = ipe_buffer_pool.pop()
            apu_buffer = apu_buffer_pool.pop()

            mid_data = detect_frame_clip[BOX_SIZE // 2]
        
            self.ipeYolov8.calc_param(ipe_stream, mid_data.frame.data, ipe_buffer)

            sdk.lyn_record_event(ipe_stream, ipe_event)
            sdk.lyn_stream_wait_event(apu_stream, ipe_event)

            sdk.lyn_execute_model_async(
                apu_stream, detect_model, ipe_buffer, apu_buffer, batch_size
            )

            sdk.lyn_record_event(apu_stream, apu_event)
            sdk.lyn_stream_wait_event(post_stream, apu_event)

            apu_output_data_c = pythonapi.PyCapsule_GetPointer(apu_buffer, None)
            
            post_para = PosePostProcessInfo_t()
            post_para.model_width = model_output_width
            post_para.model_height = model_output_height
            post_para.img_width = self.vdec_out_info.width
            post_para.img_height = self.vdec_out_info.height
            post_para.padx = self.ipeYolov8.m_iPadX
            post_para.pady = self.ipeYolov8.m_iPadY
            post_para.ratio = self.ipeYolov8.m_fRatio
            post_para.output_tensor = apu_output_data_c
            post_para.poseInfo = pose_info_apu_ptr_c

            post_para_s = ctypes.string_at(
                ctypes.addressof(post_para), ctypes.sizeof(post_para)
            )
            common.error_check(
                sdk.lyn_plugin_run_async(
                    post_stream,
                    self.post_plugin,
                    "lynPosePostProcess",
                    post_para_s,
                    ctypes.sizeof(PosePostProcessInfo_t),
                )
            )
            common.error_check(ret, "lyn_plugin_run_async")

            sdk.lyn_stream_add_callback(
                post_stream, self.detect_callback, [detect_frame_clip, pose_info_apu_ptr]
            )
            sdk.lyn_stream_add_callback(
                post_stream, return_buffer, [ipe_buffer_pool, ipe_buffer, "ipe_buffer"]
            )
            sdk.lyn_stream_add_callback(
                post_stream, return_buffer, [apu_buffer_pool, apu_buffer, "apu_buffer"]
            )

        sdk.lyn_synchronize_stream(ipe_stream)
        sdk.lyn_synchronize_stream(apu_stream)
        sdk.lyn_synchronize_stream(post_stream)
        self.ipeYolov8.destory()
        apu_buffer_pool.free_buffers()
        ipe_buffer_pool.free_buffers()
        sdk.lyn_free(pose_info_apu_ptr)
        sdk.lyn_destroy_event(ipe_event)
        sdk.lyn_destroy_event(apu_event)
        sdk.lyn_destroy_stream(ipe_stream)
        sdk.lyn_destroy_stream(apu_stream)
        sdk.lyn_destroy_stream(post_stream)
        sdk.lyn_unload_model(detect_model)

    def put_to_recognize_result_queue(self, d):
        #因编解码器地址绑定，最多25个buf，这里需要拷回到frame_pool那25个地址重复使用才行
        decoder_frame_pool_data = self.frame_pool.pop()
        common.error_check(sdk.lyn_memcpy_async(
            self.osd_stream,
            decoder_frame_pool_data,
            d.frame.data,
            d.frame.size,
            sdk.lyn_memcpy_dir_t.ServerToServer,
        ), "lyn_memcpy_async to frame_pool")

        common.error_check(sdk.lyn_stream_add_callback(
            self.osd_stream, free_to_pool_callback, [self.frame_pool_copy, d.frame.data]
        ))

        sdk.lyn_synchronize_stream(self.osd_stream)
        d.frame.data = decoder_frame_pool_data

        sdk.lyn_stream_add_callback(
            self.osd_stream, put_to_queue, [self.__recognize_result_queue, d]
        )

    def recognize(self, cancel_flag):
        sdk.lyn_set_current_context(self.__ctx)
        ipe_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")
        apu_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")
        self.osd_stream, ret = sdk.lyn_create_stream()
        common.error_check(ret, "lyn_create_stream")

        ipe_event, ret = sdk.lyn_create_event()
        common.error_check(ret, "lyn_create_event")
        apu_event, ret = sdk.lyn_create_event()
        common.error_check(ret, "lyn_create_event")
        recognize_model, ret = sdk.lyn_load_model(self.recog_model_path)
        common.error_check(ret, "lyn_load_model")

        # 获取模型信息
        model_desc, ret = sdk.lyn_model_get_desc(recognize_model)
        common.error_check(ret, "lyn_model_get_desc")
        batch_size = model_desc.inputTensorAttrArray[0].batchSize
        
        assert BOX_SIZE == model_desc.inputTensorAttrArray[0].dims[0]
        mid_box_idx = BOX_SIZE // 2

        model_input_size = model_desc.inputDataLen
        model_output_size = model_desc.outputDataLen

        self.ipeSlowfast = ipeParamSlowfast.IpeParamSlowfast(None, None)
        self.ipeSlowfast.set_img_info(
            self.vdec_out_info.width,
            self.vdec_out_info.height,
            sdk.lyn_pixel_format_t.LYN_PIX_FMT_NV12,
            CROP_SIZE
        )

        model_input_img_h = model_desc.inputTensorAttrArray[1].dims[2]
        model_input_img_w = model_desc.inputTensorAttrArray[1].dims[3]
        model_input_img_c = model_desc.inputTensorAttrArray[1].dims[4]

        ipe_output_size = model_input_img_h * model_input_img_w * model_input_img_c

        ipe_buffer_pool = bufferpool.buffer_pool(ipe_output_size, 1)

        input_size0, _ = sdk.lyn_model_get_input_tensor_data_len_by_index(recognize_model, 0)
        input_size1, _ = sdk.lyn_model_get_input_tensor_data_len_by_index(recognize_model, 1)
        input_size2, _ = sdk.lyn_model_get_input_tensor_data_len_by_index(recognize_model, 2)

        input_sizes = [input_size0, input_size1, input_size2]

        model_input_buffer_pool = bufferpool.buffer_pool(model_input_size, 1)
        model_output_buffer_pool = bufferpool.buffer_pool(model_output_size, 1)

        pHostBoxesInfo = Box()
        eos = False

        while True:
            cb_data: list[detect_frame] = self.__detect_result_queue.take()
            if not cb_data:
                continue
            eos = cb_data[-1].frame.eos
            if cb_data[-1].reconnect:
                for data in cb_data:
                    self.put_to_recognize_result_queue(data)
                self.__detect_result_queue.clear()
                continue
            if eos or len(cb_data) < BOX_SIZE:
                for data in cb_data:
                    self.put_to_recognize_result_queue(data)
                break

            host_buf = sdk.lyn_numpy_to_ptr(cb_data[0].host_box_info)
            host_buf_c = pythonapi.PyCapsule_GetPointer(host_buf, None)
            detect_info = ctypes.cast(host_buf_c, ctypes.POINTER(LynPoseInfo)).contents

            if detect_info.boxesNum <= 0:
                for d in cb_data:
                    self.put_to_recognize_result_queue(d)
                continue

            boxes_np = np.array([
                [detect_info.boxes[i].xmin, detect_info.boxes[i].ymin, detect_info.boxes[i].xmax, detect_info.boxes[i].ymax] 
                for i in range(detect_info.boxesNum)
            ], dtype=np.float32)

            scale_boxes(CROP_SIZE, boxes_np, self.vdec_out_info.height, self.vdec_out_info.width)

            boxes_with_index = np.zeros((boxes_np.shape[0], boxes_np.shape[1] + 1), dtype=boxes_np.dtype)
            boxes_with_index[:, 1:] = boxes_np

            boxes_padded = np.zeros((model_desc.inputTensorAttrArray[0].dims[0], model_desc.inputTensorAttrArray[0].dims[1]), dtype=boxes_with_index.dtype)
            n = min(boxes_with_index.shape[0], model_desc.inputTensorAttrArray[0].dims[0])
            boxes_padded[:n] = boxes_with_index[:n]

            buffer_shape = [model_desc.inputTensorAttrArray[0].dims[0], model_input_img_h, model_input_img_w, model_input_img_c]
            host_buffers = np.empty(buffer_shape, dtype=np.uint8)

            ipe_buffer = ipe_buffer_pool.pop()
            for i, data in enumerate(cb_data):
                self.ipeSlowfast.calc_param(ipe_stream, data.frame.data, ipe_buffer)
                common.error_check(sdk.lyn_memcpy_async(
                    ipe_stream,
                    sdk.lyn_numpy_to_ptr(host_buffers[i]),  # 使用预分配数组的第i个位置
                    ipe_buffer,
                    ipe_output_size,
                    sdk.lyn_memcpy_dir_t.ServerToClient,
                ), "ipe_out lyn_memcpy_async")

            common.error_check(sdk.lyn_synchronize_stream(ipe_stream), "recognize synchronize ipe_stream")

            model_inputs = process_cv2_inputs(host_buffers)
            del host_buffers

            input0 = boxes_padded.astype(SDK_DTYPE[model_desc.inputTensorAttrArray[0].dtype])
            input1 = model_inputs[0].astype(SDK_DTYPE[model_desc.inputTensorAttrArray[1].dtype])
            input2 = model_inputs[1].astype(SDK_DTYPE[model_desc.inputTensorAttrArray[2].dtype])

            # input_ptrs = [sdk.lyn_numpy_to_ptr(input0), sdk.lyn_numpy_contiguous_to_ptr(input1)[0], sdk.lyn_numpy_contiguous_to_ptr(input2)[0]]
            contiguous_arr1 = input1.copy() #不连续的内存需要连续
            contiguous_arr2 = input2.copy()
            input_ptrs = [sdk.lyn_numpy_to_ptr(input0), sdk.lyn_numpy_to_ptr(contiguous_arr1), sdk.lyn_numpy_to_ptr(contiguous_arr2)]
            del model_inputs

            model_input_buffer = model_input_buffer_pool.pop()
            target_seek = model_input_buffer

            for i in range(3):
                common.error_check(sdk.lyn_memcpy_async(
                    ipe_stream,
                    target_seek,
                    input_ptrs[i],
                    input_sizes[i],
                    sdk.lyn_memcpy_dir_t.ClientToServer,
                ), f"lyn_memcpy input_ptr{i}")

                target_seek = sdk.lyn_addr_seek(target_seek, input_sizes[i])

            common.error_check(sdk.lyn_synchronize_stream(ipe_stream), "recognize synchronize ipe_stream")
            # del input_ptrs

            model_output_buffer = model_output_buffer_pool.pop()
            sdk.lyn_record_event(ipe_stream, ipe_event)
            sdk.lyn_stream_wait_event(apu_stream, ipe_event)

            common.error_check(
                sdk.lyn_execute_model_async(
                    apu_stream,
                    recognize_model,
                    model_input_buffer,
                    model_output_buffer,
                    batch_size,
                )
            )
            
            model_output_buffer_np = np.zeros(model_output_size, dtype=np.uint8)
        
            common.error_check(sdk.lyn_memcpy_async(
                apu_stream,
                sdk.lyn_numpy_to_ptr(model_output_buffer_np),
                model_output_buffer,
                model_output_size,
                sdk.lyn_memcpy_dir_t.ServerToClient,
            ), f"lyn_memcpy model_output_buffer")

            common.error_check(
                sdk.lyn_stream_add_callback(
                    apu_stream, return_buffers, [ipe_buffer_pool, [ipe_buffer]]
                )
            )
            common.error_check(
                sdk.lyn_stream_add_callback(
                    apu_stream, return_buffers, [model_input_buffer_pool, [model_input_buffer]]
                )
            )
            common.error_check(
                sdk.lyn_stream_add_callback(
                    apu_stream, return_buffers, [model_output_buffer_pool, [model_output_buffer]]
                )
            )

            common.error_check(sdk.lyn_synchronize_stream(apu_stream), "recognize synchronize apu_stream")

            arr_float16 = np.frombuffer(model_output_buffer_np.tobytes(), dtype=np.float16)
            preds = arr_float16[:n * 80].reshape(n, 80)

            max_indices = np.argmax(preds, axis=1)
            pHostBoxesInfo.boxesnum = detect_info.boxesNum
            for j in range(pHostBoxesInfo.boxesnum):
                pHostBoxesInfo.boxes[j].xmin = detect_info.boxes[j].xmin
                pHostBoxesInfo.boxes[j].ymin = detect_info.boxes[j].ymin
                pHostBoxesInfo.boxes[j].xmax = detect_info.boxes[j].xmax
                pHostBoxesInfo.boxes[j].ymax = detect_info.boxes[j].ymax
                pHostBoxesInfo.boxes[j].score = round(float(preds[j][max_indices[j]]), 3) 
                pHostBoxesInfo.boxes[j].id = max_indices[j]
                pHostBoxesInfo.boxes[j].label = labels[str(max_indices[j])].encode("utf-8")

            if self.show_type < 2:
                sdk.lyn_record_event(apu_stream, apu_event)
                sdk.lyn_stream_wait_event(self.osd_stream, apu_event)
                for i in range(BOX_SIZE):
                    if i == mid_box_idx:
                        cb_data[mid_box_idx].host_box_info = pHostBoxesInfo
                        pHostBoxesInfo_np = np.frombuffer(
                            pHostBoxesInfo,
                            dtype=np.uint8,
                            count=ctypes.sizeof(Box) // np.dtype(np.uint8).itemsize,
                        )

                        pHostBoxesInfo_ptr = sdk.lyn_numpy_to_ptr(pHostBoxesInfo_np)
                        pDevBoxesInfo, ret = sdk.lyn_malloc(ctypes.sizeof(Box))
                        common.error_check(
                            sdk.lyn_memcpy_async(
                                self.osd_stream,
                                pDevBoxesInfo,
                                pHostBoxesInfo_ptr,
                                ctypes.sizeof(Box),
                                sdk.lyn_memcpy_dir_t.ClientToServer,
                            )
                        )
                        pDevBoxesInfo_ptr = pythonapi.PyCapsule_GetPointer(pDevBoxesInfo, None)
                        decodeImg_data_ptr = pythonapi.PyCapsule_GetPointer(
                            cb_data[i].frame.data, None
                        )
            
                        osd_para = struct.pack(
                            "1P3i1P4i",
                            pDevBoxesInfo_ptr,
                            self.vdec_out_info.width,
                            self.vdec_out_info.height,
                            sdk.lyn_pixel_format_t.LYN_PIX_FMT_NV12,
                            decodeImg_data_ptr,
                            24,
                            4,
                            2,
                            4,
                        )
                        common.error_check(
                            sdk.lyn_plugin_run_async(
                                self.osd_stream,
                                self.osd_plugin,
                                "lynDrawBoxAndText",
                                osd_para,
                                len(osd_para),
                            )
                        )

                        common.error_check(
                            sdk.lyn_stream_add_callback(
                                self.osd_stream,
                                free_device_memory,
                                pDevBoxesInfo,
                            )
                        )
                    else:
                        cb_data[i].host_box_info = None

                    self.put_to_recognize_result_queue(cb_data[i])

            else:
                for i in range(BOX_SIZE):
                    if i == mid_box_idx:
                        cb_data[mid_box_idx].host_box_info = pHostBoxesInfo
                    else:
                        cb_data[i].host_box_info = None

                    self.put_to_recognize_result_queue(cb_data[i])
            
            common.print_frame_rate(
                f"{self.device_id}_{self.channel_id}", None
            )

        sdk.lyn_synchronize_stream(ipe_stream)
        sdk.lyn_synchronize_stream(apu_stream)
        sdk.lyn_synchronize_stream(self.osd_stream)
        ipe_buffer_pool.free_buffers()
        model_input_buffer_pool.free_buffers()
        model_output_buffer_pool.free_buffers()
        self.ipeSlowfast.destory()

        sdk.lyn_destroy_event(ipe_event)
        sdk.lyn_destroy_event(apu_event)
        sdk.lyn_destroy_stream(ipe_stream)
        sdk.lyn_destroy_stream(apu_stream)
        sdk.lyn_destroy_stream(self.osd_stream)
        sdk.lyn_unload_model(recognize_model)

    def generate_result(self, cancel_flag):
        """存储结果或者用opencv显示"""
        sdk.lyn_set_current_context(self.__ctx)
        if self.show_type == 1:
            encoder = Encoder(
                self.__ctx, self.frame_pool, self.vdec_out_info, self.output_path
            )
        elif self.show_type == 0:
            opencv_window = OpencvWindow(
                self.frame_pool, self.video_frame, self.vdec_out_info
            )
        eos = False
        while not eos:
            cb_data = self.__recognize_result_queue.take()
            frame = cb_data.frame
            if cb_data.reconnect:
                self.__recognize_result_queue.clear()
                break
            eos = frame.eos
            if self.show_type == 0:
                opencv_window.show(frame)
            elif self.show_type == 1:
                encoder.encode(frame)
            else:
                # 保存json结果
                if cb_data.host_box_info:
                    dump_box_json(self.output_path, cb_data.host_box_info)
                del cb_data.host_box_info
                self.frame_pool.push(frame.data)

        if self.show_type == 1:
            encoder.venc_recv_pool.free_buffers()
            sdk.lyn_venc_close(encoder.venc_handle)
            del encoder


    def close(self):
        if self.__decode_send_thread.is_alive():
            self.__decode_send_thread.join()
        if self.__decode_recv_thread.is_alive():
            self.__decode_recv_thread.join()
        if self.__detect_thread.is_alive():
            self.__detect_thread.join()
        if self.__recognize_thread.is_alive():
            self.__recognize_thread.join()
        if self.__generate_result_thread.is_alive():
            self.__generate_result_thread.join()
        sdk.lyn_vdec_close(self.__vdec_hdl)
        sdk.lyn_demux_close(self.__demux_hdl)
        self.frame_pool.free_buffers()
        self.frame_pool_copy.free_buffers()
        sdk.lyn_plugin_unregister(self.post_plugin)
        sdk.lyn_plugin_unregister(self.osd_plugin)
        sdk.lyn_destroy_context(self.__ctx)
