import os
import urllib
import traceback
import time
import sys
import numpy as np
import cv2
from rknn.api import RKNN
import cv2,math
from math import ceil
from itertools import product as product
from shapely.geometry import Polygon
import threading
from queue import Queue
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg, QoS, BaseNode, get_extra_args
from spirems.mod_helper import download_model
import argparse
from typing import Union
import platform
from copy import copy
import json


class RKNN_model_container():
    def __init__(self, model_path, target=None, device_id=None) -> None:
        rknn = RKNN()

        # Direct Load RKNN Model
        rknn.load_rknn(model_path)

        print('--> Init runtime environment')
        if target==None:
            ret = rknn.init_runtime()
        else:
            ret = rknn.init_runtime(target=target, device_id=device_id)
        if ret != 0:
            print('Init runtime environment failed')
            exit(ret)
        print('done')
        
        self.rknn = rknn

    # def __del__(self):
    #     self.release()

    def run(self, inputs):
        if self.rknn is None:
            print("ERROR: rknn has been released")
            return []

        if isinstance(inputs, list) or isinstance(inputs, tuple):
            pass
        else:
            inputs = [inputs]

        result = self.rknn.inference(inputs=inputs)
    
        return result

    def release(self):
        self.rknn.release()
        self.rknn = None


class Letter_Box_Info():
    def __init__(self, shape, new_shape, w_ratio, h_ratio, dw, dh, pad_color) -> None:
        self.origin_shape = shape
        self.new_shape = new_shape
        self.w_ratio = w_ratio
        self.h_ratio = h_ratio
        self.dw = dw 
        self.dh = dh
        self.pad_color = pad_color


def coco_eval_with_json(anno_json, pred_json):
    from pycocotools.coco import COCO
    from pycocotools.cocoeval import COCOeval
    anno = COCO(anno_json)
    pred = anno.loadRes(pred_json)
    eval = COCOeval(anno, pred, 'bbox')
    # eval.params.useCats = 0
    # eval.params.maxDets = list((100, 300, 1000))
    # a = np.array(list(range(50, 96, 1)))/100
    # eval.params.iouThrs = a
    eval.evaluate()
    eval.accumulate()
    eval.summarize()
    map, map50 = eval.stats[:2]  # update results (mAP@0.5:0.95, mAP@0.5)

    print('map  --> ', map)
    print('map50--> ', map50)
    print('map75--> ', eval.stats[2])
    print('map85--> ', eval.stats[-2])
    print('map95--> ', eval.stats[-1])


class COCO_test_helper():
    def __init__(self, enable_letter_box = False) -> None:
        self.record_list = []
        self.enable_ltter_box = enable_letter_box
        if self.enable_ltter_box is True:
            self.letter_box_info_list = []
        else:
            self.letter_box_info_list = None

    def letter_box(self, im, new_shape, pad_color=(0,0,0), info_need=False):
        # Resize and pad image while meeting stride-multiple constraints
        shape = im.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

        # Compute padding
        ratio = r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=pad_color)  # add border
        
        if self.enable_ltter_box is True:
            self.letter_box_info_list.append(Letter_Box_Info(shape, new_shape, ratio, ratio, dw, dh, pad_color))
        if info_need is True:
            return im, ratio, (dw, dh)
        else:
            return im

    def direct_resize(self, im, new_shape, info_need=False):
        shape = im.shape[:2]
        h_ratio = new_shape[0] / shape[0]
        w_ratio = new_shape[1] / shape[1]
        if self.enable_ltter_box is True:
            self.letter_box_info_list.append(Letter_Box_Info(shape, new_shape, w_ratio, h_ratio, 0, 0, (0,0,0)))
        im = cv2.resize(im, (new_shape[1], new_shape[0]))
        return im

    def get_real_box(self, box, in_format='xyxy'):
        bbox = copy(box)
        if self.enable_ltter_box == True:
        # unletter_box result
            if in_format=='xyxy':
                bbox[:,0] -= self.letter_box_info_list[-1].dw
                bbox[:,0] /= self.letter_box_info_list[-1].w_ratio
                bbox[:,0] = np.clip(bbox[:,0], 0, self.letter_box_info_list[-1].origin_shape[1])

                bbox[:,1] -= self.letter_box_info_list[-1].dh
                bbox[:,1] /= self.letter_box_info_list[-1].h_ratio
                bbox[:,1] = np.clip(bbox[:,1], 0, self.letter_box_info_list[-1].origin_shape[0])

                bbox[:,2] -= self.letter_box_info_list[-1].dw
                bbox[:,2] /= self.letter_box_info_list[-1].w_ratio
                bbox[:,2] = np.clip(bbox[:,2], 0, self.letter_box_info_list[-1].origin_shape[1])

                bbox[:,3] -= self.letter_box_info_list[-1].dh
                bbox[:,3] /= self.letter_box_info_list[-1].h_ratio
                bbox[:,3] = np.clip(bbox[:,3], 0, self.letter_box_info_list[-1].origin_shape[0])
        return bbox

    def get_real_seg(self, seg):
        #! fix side effect
        dh = int(self.letter_box_info_list[-1].dh)
        dw = int(self.letter_box_info_list[-1].dw)
        origin_shape = self.letter_box_info_list[-1].origin_shape
        new_shape = self.letter_box_info_list[-1].new_shape
        if (dh == 0) and (dw == 0) and origin_shape == new_shape:
            return seg
        elif dh == 0 and dw != 0:
            seg = seg[:, :, dw:-dw] # a[0:-0] = []
        elif dw == 0 and dh != 0 : 
            seg = seg[:, dh:-dh, :]
        seg = np.where(seg, 1, 0).astype(np.uint8).transpose(1, 2, 0)
        seg = cv2.resize(seg, (origin_shape[1], origin_shape[0]), interpolation=cv2.INTER_LINEAR)
        if len(seg.shape) < 3:
            return seg[None,:,:]
        else:
            return seg.transpose(2,0,1)

    def add_single_record(self, image_id, category_id, bbox, score, in_format='xyxy', pred_masks = None):
        if self.enable_ltter_box == True:
        # unletter_box result
            if in_format=='xyxy':
                bbox[0] -= self.letter_box_info_list[-1].dw
                bbox[0] /= self.letter_box_info_list[-1].w_ratio

                bbox[1] -= self.letter_box_info_list[-1].dh
                bbox[1] /= self.letter_box_info_list[-1].h_ratio

                bbox[2] -= self.letter_box_info_list[-1].dw
                bbox[2] /= self.letter_box_info_list[-1].w_ratio

                bbox[3] -= self.letter_box_info_list[-1].dh
                bbox[3] /= self.letter_box_info_list[-1].h_ratio
                # bbox = [value/self.letter_box_info_list[-1].ratio for value in bbox]

        if in_format=='xyxy':
        # change xyxy to xywh
            bbox[2] = bbox[2] - bbox[0]
            bbox[3] = bbox[3] - bbox[1]
        else:
            assert False, "now only support xyxy format, please add code to support others format"
        
        def single_encode(x):
            from pycocotools.mask import encode
            rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0]
            rle["counts"] = rle["counts"].decode("utf-8")
            return rle

        if pred_masks is None:
            self.record_list.append({"image_id": image_id,
                                    "category_id": category_id,
                                    "bbox":[round(x, 3) for x in bbox],
                                    'score': round(score, 5),
                                    })
        else:
            rles = single_encode(pred_masks)
            self.record_list.append({"image_id": image_id,
                                    "category_id": category_id,
                                    "bbox":[round(x, 3) for x in bbox],
                                    'score': round(score, 5),
                                    'segmentation': rles,
                                    })
    
    def export_to_json(self, path):
        with open(path, 'w') as f:
            json.dump(self.record_list, f)

class DetectBox:
    def __init__(self, classId, score, xmin, ymin, xmax, ymax,angle):
        self.classId = classId
        self.score = score
        self.xmin = xmin
        self.ymin = ymin
        self.xmax = xmax
        self.ymax = ymax
        self.angle=angle

def rotate_rectangle(x1, y1, x2, y2, a):
    # 计算中心点坐标
    cx = (x1 + x2) / 2
    cy = (y1 + y2) / 2

    # 将角度转换为弧度
    # a = math.radians(a)
    # 对每个顶点进行旋转变换
    x1_new = int((x1 - cx) * math.cos(a) - (y1 - cy) * math.sin(a) + cx)
    y1_new = int((x1 - cx) * math.sin(a) + (y1 - cy) * math.cos(a) + cy)

    x2_new = int((x2 - cx) * math.cos(a) - (y2 - cy) * math.sin(a) + cx)
    y2_new = int((x2 - cx) * math.sin(a) + (y2 - cy) * math.cos(a) + cy)

    x3_new = int((x1 - cx) * math.cos(a) - (y2 - cy) * math.sin(a) + cx)
    y3_new = int((x1 - cx) * math.sin(a) + (y2 - cy) * math.cos(a) + cy)

    x4_new =int( (x2 - cx) * math.cos(a) - (y1 - cy) * math.sin(a) + cx)
    y4_new =int( (x2 - cx) * math.sin(a) + (y1 - cy) * math.cos(a) + cy)
    return [(x1_new, y1_new), (x3_new, y3_new),(x2_new, y2_new) ,(x4_new, y4_new)]

def rotated_rect_to_xywh(rotated_rect):
    """将旋转矩形 (center_x, center_y, width, height, angle) 转换为普通 xywh 格式"""
    # 获取旋转矩形的 4 个顶点
    box_points = cv2.boxPoints(rotated_rect)  # 返回 4x2 的 numpy 数组

    # 计算外接正矩形（无旋转）
    x, y, w, h = cv2.boundingRect(box_points)

    return x, y, w, h

def intersection(g, p):
    g=np.asarray(g)
    p=np.asarray(p)
    g = Polygon(g[:8].reshape((4, 2)))
    p = Polygon(p[:8].reshape((4, 2)))
    if not g.is_valid or not p.is_valid:
        return 0
    inter = Polygon(g).intersection(Polygon(p)).area
    union = g.area + p.area - inter
    if union == 0:
        return 0
    else:
        return inter/union

def NMS(detectResult, nmsThresh):
    predBoxs = []

    sort_detectboxs = sorted(detectResult, key=lambda x: x.score, reverse=True)
    for i in range(len(sort_detectboxs)):
        xmin1 = sort_detectboxs[i].xmin
        ymin1 = sort_detectboxs[i].ymin
        xmax1 = sort_detectboxs[i].xmax
        ymax1 = sort_detectboxs[i].ymax
        classId = sort_detectboxs[i].classId
        angle = sort_detectboxs[i].angle
        p1=rotate_rectangle(xmin1, ymin1, xmax1, ymax1, angle)
        p1=np.array(p1).reshape(-1)
        
        if sort_detectboxs[i].classId != -1:
            predBoxs.append(sort_detectboxs[i])
            for j in range(i + 1, len(sort_detectboxs), 1):
                if classId == sort_detectboxs[j].classId:
                    xmin2 = sort_detectboxs[j].xmin
                    ymin2 = sort_detectboxs[j].ymin
                    xmax2 = sort_detectboxs[j].xmax
                    ymax2 = sort_detectboxs[j].ymax
                    angle2 = sort_detectboxs[j].angle
                    p2 = rotate_rectangle(xmin2, ymin2, xmax2, ymax2, angle2)
                    p2 = np.array(p2).reshape(-1)
                    iou = intersection(p1, p2)
                    if iou > nmsThresh:
                        sort_detectboxs[j].classId = -1
    return predBoxs

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def softmax(x, axis=-1):
    # 将输入向量减去最大值以提高数值稳定性
    exp_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
    return exp_x / np.sum(exp_x, axis=axis, keepdims=True)


def letterbox_resize(image, size, bg_color):
    """
    letterbox_resize the image according to the specified size
    :param image: input image, which can be a NumPy array or file path
    :param size: target size (width, height)
    :param bg_color: background filling data 
    :return: processed image
    """
    if isinstance(image, str):
        image = cv2.imread(image)

    target_width, target_height = size
    image_height, image_width, _ = image.shape

    # Calculate the adjusted image size
    aspect_ratio = min(target_width / image_width, target_height / image_height)
    new_width = int(image_width * aspect_ratio)
    new_height = int(image_height * aspect_ratio)

    # Use cv2.resize() for proportional scaling
    image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA)

    # Create a new canvas and fill it
    result_image = np.ones((target_height, target_width, 3), dtype=np.uint8) * bg_color
    offset_x = (target_width - new_width) // 2
    offset_y = (target_height - new_height) // 2
    result_image[offset_y:offset_y + new_height, offset_x:offset_x + new_width] = image
    return result_image, aspect_ratio, offset_x, offset_y


class YOLOv11ObbNode_Rknn(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        sms_shutdown: bool = True,
        **kwargs
    ):
        threading.Thread.__init__(self)
        sms_shutdown = True if sms_shutdown in ['True', 'true', '1', True] else False
        BaseNode.__init__(
            self,
            self.__class__.__name__,
            job_name,
            ip=ip,
            port=port,
            param_dict_or_file=param_dict_or_file,
            sms_shutdown=sms_shutdown,
            **kwargs
        )
        self.launch_next_emit = self.get_param("launch_next_emit", True)
        self.specified_input_topic = self.get_param("specified_input_topic", "")
        self.specified_output_topic = self.get_param("specified_output_topic", "")
        self.realtime_det = self.get_param("realtime_det", True)
        self.remote_ip = self.get_param("remote_ip", "127.0.0.1")
        self.remote_port = self.get_param("remote_port", 9094)
        self.confidence = self.get_param("confidence", 0.25)
        self.nms_thresh = self.get_param("nms_thresh", 0.45)
        self.imgsz = self.get_param("imgsz", [640, 640])
        self.dataset_name = self.get_param("dataset_name", "dota_v1.0")
        self.model_path = self.get_param("model_path", "sms::yolo11n-obb.rknn")
        self.target = self.get_param("target", "rk3588")
        self.device_id = self.get_param("device_id", "")
        self.use_shm = self.get_param("use_shm", -1)
        self.g_dataset_categories = self.get_param("/dataset_categories", {})
        self.params_help()

        self.b_use_shm = False
        if self.use_shm == 1 or (self.use_shm == -1 and platform.system() == 'Linux'):
            self.b_use_shm = True

        if self.model_path.startswith("sms::"):
            self.local_model_path = download_model(self.__class__.__name__, self.model_path)
            assert self.local_model_path is not None
        else:
            self.local_model_path = self.model_path

        self.dataset_categories = self.g_dataset_categories[self.dataset_name]
        self.model, self.platform = self.setup_model()
        #self.co_helper = COCO_test_helper(enable_letter_box=True)

        input_url = '/' + job_name + '/sensor/image_raw'
        if len(self.specified_input_topic) > 0:
            input_url = self.specified_input_topic

        output_url = '/' + job_name + '/detector/results'
        if len(self.specified_output_topic) > 0:
            output_url = self.specified_output_topic

        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)

        self._image_reader = Subscriber(
            input_url, 'std_msgs::Null', self.image_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._result_writer = Publisher(
            output_url, 'spirecv_msgs::2DTargets',
            ip=self.remote_ip, port=self.remote_port, qos=QoS.Reliability
        )
        self._show_writer = Publisher(
            '/' + job_name + '/detector/image_results', 'memory_msgs::RawImage' if self.b_use_shm else 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )
        if self.launch_next_emit:
            self._next_writer = Publisher(
                '/' + job_name + '/launch_next', 'std_msgs::Boolean',
                ip=ip, port=port, qos=QoS.Reliability
            )

        self.start()
    
    def trans_det_results(self, predbox, aspect_ratio, offset_x, offset_y, h, w, roi=None):
        sms_results = def_msg('spirecv_msgs::2DTargets')

        sms_results["file_name"] = ""
        sms_results["height"] = h
        sms_results["width"] = w
        sms_results["targets"] = []
        if roi is not None:
            sms_results["rois"] = [[roi[0], roi[1], roi[2] - roi[0], roi[3] - roi[1]]]

        if predbox is not None:
            # boxes = self.co_helper.get_real_box(boxes)
            for i in range(len(predbox)):
                ann = dict()
                ann["category_name"] = self.dataset_categories[predbox[i].classId]
                ann["category_id"] = int(predbox[i].classId)
                ann["score"] = float(round(predbox[i].score, 3))
                xmin = int((predbox[i].xmin-offset_x)/aspect_ratio)
                ymin = int((predbox[i].ymin-offset_y)/aspect_ratio)
                xmax = int((predbox[i].xmax-offset_x)/aspect_ratio)
                ymax = int((predbox[i].ymax-offset_y)/aspect_ratio)
                angle =  math.degrees(predbox[i].angle)
                ann["obb"] = [(xmin+xmax)/2,(ymax+ymin)/2, (xmax-xmin),(ymax-ymin)]
                ann["obb"].append(angle)
                ann["bbox"] = [xmin, ymin, xmax, ymax]
                ann["bbox"] = rotated_rect_to_xywh(
                    [(ann["obb"][0], ann["obb"][1]), (ann["obb"][2], ann["obb"][3]), (ann["obb"][4])]
                )
                #ann["bbox"][2] = ann["bbox"][2] - ann["bbox"][0]
                #ann["bbox"][3] = ann["bbox"][3] - ann["bbox"][1]
                if roi is not None:
                    ann["bbox"][0] += roi[0]
                    ann["bbox"][1] += roi[1]

                #ann["angle"]=predbox[i].angle
                sms_results["targets"].append(ann)

        return sms_results

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._result_writer.kill()
        self._show_writer.kill()
        self._next_writer.kill()

    def image_callback(self, msg):
        if self.realtime_det:
            if not self.job_queue.empty():
                self.job_queue.queue.clear()
        img = sms2cvimg(msg)
        self.job_queue.put({'msg': msg, 'img': img})

    def run(self):
        while self.is_running():
            msg_dict = self.job_queue.get(block=True)
            if msg_dict is None:
                break

            msg, img_src = msg_dict['msg'], msg_dict['img']

            if "rois" in msg and len(msg["rois"]) > 0:
                roi = msg["rois"][0]
                img_infer = img_src[roi[1]: roi[3], roi[0]: roi[2], :]
            else:
                roi = None
                img_infer = img_src

            t1 = time.time()
            file_name = msg['file_name'] if 'file_name' in msg else ''

            # DO Object Detection
            pad_color = (0, 0, 0)
            img, aspect_ratio, offset_x, offset_y  = letterbox_resize(img_infer, (self.imgsz[0], self.imgsz[1]), 56)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            # preprocee if not rknn model
            if self.platform in ['pytorch', 'onnx']:
                input_data = img.transpose((2, 0, 1))
                input_data = input_data.reshape(1, *input_data.shape).astype(np.float32)
                input_data = input_data / 255.
            else:
                input_data = img
            outputs = self.model.run([input_data])
            predbox = self.post_process(outputs)

            res_msg = self.trans_det_results(predbox, aspect_ratio, offset_x, offset_y, img_src.shape[0], img_src.shape[1], roi)
            res_msg['file_name'] = file_name
            res_msg['dataset'] = self.dataset_name
            if 'client_id' in msg:
                res_msg['client_id'] = msg['client_id']
            if 'file_name' in msg:
                res_msg['file_name'] = msg['file_name']
            if 'img_id' in msg:
                res_msg['img_id'] = msg['img_id']
            if 'img_total' in msg:
                res_msg['img_total'] = msg['img_total']
            res_msg['time_used'] = time.time() - t1
            if "img_id" in msg:
                res_msg["img_id"] = msg["img_id"]

            self._result_writer.publish(res_msg)

            if 'img_total' in msg and self.launch_next_emit:
                next_msg = def_msg('std_msgs::Boolean')
                next_msg['data'] = True
                self._next_writer.publish(next_msg)
                print('img_id', msg['img_id'])

            if self.b_use_shm:
                msg = self._show_writer.cvimg2sms_mem(img_src)
            msg['spirecv_msgs::2DTargets'] = res_msg
            self._show_writer.publish(msg)
            # END

        self.release()
        print('{} quit!'.format(self.__class__.__name__))
    
    def setup_model(self):
        model_path =  self.local_model_path
        target = self.target
        device_id = self.device_id if len(self.device_id) else None
        if model_path.endswith('.pt') or model_path.endswith('.torchscript'):
            platform = 'pytorch'
            from py_utils.pytorch_executor import Torch_model_container
            model = Torch_model_container(model_path)
        elif model_path.endswith('.rknn'):
            platform = 'rknn' 
            model = RKNN_model_container(model_path, target, device_id)
        elif model_path.endswith('onnx'):
            platform = 'onnx'
            from py_utils.onnx_executor import ONNX_model_container
            model = ONNX_model_container(model_path)
        else:
            assert False, "{} is not rknn/pytorch/onnx model".format(model_path)
        print('Model-{} is {} model, starting val'.format(model_path, platform))
        return model, platform

    def post_process(self, input_data):
        results = input_data
        outputs = []
        for x in results[:-1]:
            index,stride = 0,0
            if x.shape[2] == 20:
                stride = 32
                index = 20 * 4 * 20 * 4 + 20 * 2 * 20 * 2
            if x.shape[2] == 40:
                stride = 16
                index = 20 * 4 * 20 * 4
            if x.shape[2] == 80:
                stride = 8
                index = 0
            feature = x.reshape(1,79, -1)
            output = self.process(feature, x.shape[3], x.shape[2], stride,results[-1], index)
            outputs = outputs+output
        predbox = NMS(outputs, self.nms_thresh)

        return predbox

    def process(self, out, model_w, model_h, stride, angle_feature, index, scale_w=1, scale_h=1):
        class_num = len(self.dataset_categories)
        angle_feature = angle_feature.reshape(-1)
        xywh = out[:,:64,:]
        conf = sigmoid(out[:, 64:, :])
        out = []
        conf = conf.reshape(-1)
        for ik in range(model_h * model_w * class_num):
            if conf[ik] > self.confidence:
                w = ik % model_w
                h = (ik%(model_w * model_h)) // model_w
                c = ik // (model_w*model_h)
                xywh_ = xywh[0, :, (h * model_w) + w] #[1,64,1]
                xywh_ = xywh_.reshape(1, 4, 16, 1)
                data = np.array([i for i in range(16)]).reshape(1, 1, 16, 1)
                xywh_ = softmax(xywh_, 2)
                xywh_ = np.multiply(data, xywh_)
                xywh_ = np.sum(xywh_, axis=2, keepdims=True).reshape(-1)
                xywh_add = xywh_[:2] + xywh_[2:]
                xywh_sub = (xywh_[2:] - xywh_[:2]) / 2
                angle_feature_= (angle_feature[index + (h * model_w) + w]-0.25) * 3.1415927410125732
                angle_feature_cos = math.cos(angle_feature_)
                angle_feature_sin = math.sin(angle_feature_)
                xy_mul1 = xywh_sub[0] * angle_feature_cos
                xy_mul2 = xywh_sub[1] * angle_feature_sin
                xy_mul3 = xywh_sub[0] * angle_feature_sin
                xy_mul4 = xywh_sub[1] * angle_feature_cos
                xy = xy_mul1 - xy_mul2, xy_mul3 + xy_mul4
                xywh_1 = np.array([(xy_mul1 - xy_mul2) + w + 0.5, (xy_mul3 + xy_mul4) + h + 0.5, xywh_add[0], xywh_add[1]])
                xywh_ = xywh_1 * stride
                xmin = (xywh_[0] - xywh_[2] / 2) * scale_w
                ymin = (xywh_[1] - xywh_[3] / 2) * scale_h
                xmax = (xywh_[0] + xywh_[2] / 2) * scale_w
                ymax = (xywh_[1] + xywh_[3] / 2) * scale_h
                box = DetectBox(c,conf[ik], xmin, ymin, xmax, ymax, angle_feature_)
                out.append(box)
        return out


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        type=str,
        default='default_params.json',
        help='SpireCV2 Config (.json)')
    parser.add_argument(
        '--job-name', '-j',
        type=str,
        default='live',
        help='SpireCV Job Name')
    parser.add_argument(
        '--ip',
        type=str,
        default='127.0.0.1',
        help='SpireMS Core IP')
    parser.add_argument(
        '--port',
        type=int,
        default=9094,
        help='SpireMS Core Port')
    args, unknown_args = parser.parse_known_args()
    if not os.path.isabs(args.config):
        current_path = os.path.abspath(__file__)
        params_dir = os.path.join(current_path[:current_path.find('spirecv-pro') + 11], 'params', 'spirecv2')
        args.config = os.path.join(params_dir, args.config)
    print("--config:", args.config)
    print("--job-name:", args.job_name)
    extra = get_extra_args(unknown_args)

    node = YOLOv11ObbNode_Rknn(args.job_name, param_dict_or_file=args.config, ip=args.ip, port=args.port, **extra)
    node.spin()
