﻿# coding=utf-8
# 2023/4/27: 首个记录，基于旧2021/8/12版本
# 2023/5/5: 枚举基于Enum
# 2023/10/11: 补充部分英文注释
# 2023/10/27: 增加雷达径向速度字段。to_general_sample输出TTC等指标值
# 2023/10/30: 增加雷达切向速度字段
# 2023/11/15: 支持v7，增加z轴速度/加速度字段
# 2024/3/19: to_general_sample固定输出v6，增加to_general_sample_v7
# 2025/7/18: 修正significant赋值
# 2025/7/21: 使用类型注解。优化to_general_sample
# 2025/8/7: 新增ObjectClass.WheelBarrow, TruckHead, TrafficSign, TrafficLight

import bi_common as bi
from math import pi, sin, cos, atan2, sqrt
from enum import Enum
from typing import List, Any


class PositionMode(Enum):
    CLOSEST_POINT = 1  # 最近点
    BOX_CENTER = 2  # 框中心


class ObjectClass(Enum):
    GENERAL = 1  # 一般物体大类
    CAR = 2  # 车辆大类
    PED = 3  # 行人大类
    TRUCK = 4  # 货车大类
    BIKE = 5  # Bike大类
    RAIL_CAR = 6  # 轨道车辆大类
    SPECIAL = 7  # 特殊物体大类
    ROAD_STATIC = 8  # 道路内静态物体大类
    SIDE_STATIC = 9  # 道路外静态物体大类

    GENERAL_SMALL = 11  # 一般小物体
    GENERAL_BIG = 12  # 一般大物体
    BARRIER = 13  # 一般障碍物

    VAN = 21  # 面包车
    MINIBUS = 22  # 小巴
    BUS = 23  # 大巴
    BATTERY_CART = 24  # 园区电瓶车
    TINY_CAR = 25  # 微型车
    SUV = 26  # SUV

    ADULT = 31  # 成人
    CHILD = 32  # 小孩
    SCOOTER = 33  # 平衡车
    WHEEL_CHAIR = 34  # 轮椅
    WHEEL_BARROW = 35  # 手推车

    MINITRUCK = 41  # 小卡车
    CONTAINER_TRUCK = 42  # 货柜车（货柜车头 + 拖车）
    SPECIAL_CAR = 43  # 特种车辆
    TRAILER = 44  # 拖车
    TRUCK_HEAD = 45  # 货柜车头

    MOTORBIKE = 51  # 摩托车
    BICYCLE = 52  # 自行车
    ELECTRIC_BIKE = 53  # 电瓶自行车
    TRICYCLE = 54  # 三轮车

    TRAIN = 61  # 火车
    TRAM = 62  # 有轨电车

    ANIMAL = 71  # 动物
    BALL = 72  # 球类
    LITTER = 73  # 垃圾等杂物

    CONE = 81  # 锥形路障
    MANHOLE_COVER = 82  # 井盖
    PATCH = 83  # 路面补丁
    GANTRY = 84  # 龙门架

    POLE = 91  # 竖杆
    TREE = 92  # 树木
    VEGETATION = 93  # 灌木
    BUILDING = 94  # 建筑物
    TRAFFIC_SIGN = 95  # 交通标志
    TRAFFIC_LIGHT = 96  # 交通灯

class ObjectColor:
    def __init__(self):
        self.valid: bool = False  # Whether the color is valid / 颜色是否有效
        self.r: int = 0  # Red component of the object's color / 目标物的颜色R分量
        self.g: int = 0  # Green component of the object's color / 目标物的颜色g分量
        self.b: int = 0  # Blue component of the object's color / 目标物的颜色b分量


class ObjectInfo:
    def __init__(self):
        self.id: int = 0  # ID of the object / 目标物ID
        self.age: int = 0  # Age of the object / 目标物的Age
        self.raw_id: int | None = None  # Raw ID of the object / 目标物的原始ID
        self.raw_age: int | None = None  # Raw age of the object / 目标物的原始Age
        self.confidence: float | None = None  # [%] Confidence (existence) of the object / 目标物的置信度
        self.time_offset: int | None = None  # [us] Time offset (to the frame's time) / 时间偏置(相对于帧的时间戳)
        self.color: ObjectColor = ObjectColor()  # Color of the object / 目标物的颜色
        self.classification: ObjectClass = ObjectClass.GENERAL  # Classification of the object / 目标物的类别
        self.class_confidence: float | None = None  # [%] Confidence (classification) of the object / 目标物的分类置信度
        self.raw_class_id: int | None = None  # Raw classification ID of the object / 目标物的原始类别ID
        self.pos_mode: PositionMode = PositionMode.CLOSEST_POINT  # Position mode of the object 目标物的位置模式
        self.posx: float = 0.0  # [m] X-axis coordination of the object's position / 目标物的x轴方向位置
        self.posy: float = 0.0  # [m] Y-axis coordination of the object's position / 目标物的y轴方向位置
        self.posz: float = 0.0  # [m] Z-axis coordination of the object's position / 目标物的z轴方向位置
        self.posx_sigma: float | None = None  # [m] X-axis sigma of the object's position / 目标物x轴方向位置的精度
        self.posy_sigma: float | None = None  # [m] Y-axis sigma of the object's position / 目标物y轴方向位置的精度
        self.posz_sigma: float | None = None  # [m] Z-axis sigma of the object's position / 目标物z轴方向位置的精度
        self.cpx: float = 0.0  # [m] X-axis coordination of the object's closest point / 目标物的最近点x轴坐标
        self.cpy: float = 0.0  # [m] Y-axis coordination of the object's closest point / 目标物的最近点y轴坐标
        self.cpd: float = 0.0  # [m] Distance between the object's closest point and subject vehicle's contour / 目标物的最近点与本车轮廓距离
        self.width: float | None = None  # [m] Width of the object's box / 目标物的宽度
        self.length: float | None = None  # [m] Length of the object's box / 目标物的长度
        self.height: float | None = None  # [m] Height of the object's box / 目标物的高度
        self.heading: float | None = None  # [deg] Orientation of the object / 目标物的朝向
        self.vx_rel: float | None = None  # [KPH] X-axis relative velocity of the object / x轴方向相对速度
        self.vx_abs: float | None = None  # [KPH] X-axis absolute velocity of the object / x轴方向绝对速度
        self.vy_rel: float | None = None  # [KPH] Y-axis relative velocity of the object / y轴方向相对速度
        self.vy_abs: float | None = None  # [KPH] Y-axis absolute velocity of the object / y轴方向绝对速度
        self.vz_rel: float | None = None  # [KPH] Z-axis relative velocity of the object / z轴方向相对速度
        self.vz_abs: float | None = None  # [KPH] Z-axis absolute velocity of the object / z轴方向绝对速度
        self.ax_rel: float | None = None  # [m/s²] X-axis relative acceleration of the object / x轴方向相对加速度
        self.ax_abs: float | None = None  # [m/s²] X-axis absolute acceleration of the object / x轴方向绝对加速度
        self.ay_rel: float | None = None  # [m/s²] Y-axis relative acceleration of the object / y轴方向相对加速度
        self.ay_abs: float | None = None  # [m/s²] Y-axis absolute acceleration of the object / y轴方向绝对加速度
        self.az_rel: float | None = None  # [m/s²] Z-axis relative acceleration of the object / z轴方向相对加速度
        self.az_abs: float | None = None  # [m/s²] Z-axis absolute acceleration of the object / z轴方向绝对加速度
        self.yaw_rate: float | None = None  # [deg/s] Yaw rate of the object / 横摆角速度
        self.curvature: float | None = None  # [1/m] Turning curvature of the object / 转弯曲率
        self.contour: List[float] = []  # List of contour points, in order of x1,y1,x2,y2... / 目标物轨迹点的列表，按x1,y1,x2,y2...排列
        self.rcs: float | None = None  # [dB] Radar-cross-section of the radar target / 雷达目标物的雷达散射截面
        self.v_lng: float | None = None  # [m/s] Longitudinal velocity of the radar target / 雷达目标物的径向速度
        self.v_lat: float | None = None  # [m/s] Lateral velocity of the radar target / 雷达目标物的切向速度

    def category(self) -> ObjectClass:
        if self.classification.value < 10:
            return self.classification
        else:
            return ObjectClass(int(self.classification.value / 10))

    def speed(self) -> float | None:
        if self.vx_abs is not None and self.vy_abs is not None:
            if self.vz_abs is not None:
                return sqrt(self.vx_abs * self.vx_abs + self.vy_abs * self.vy_abs + self.vz_abs * self.vz_abs)
            else:
                return sqrt(self.vx_abs * self.vx_abs + self.vy_abs * self.vy_abs)
        else:
            return None

    def box_corners(self, coef: float) -> List[float] | None:  # 8 points are connected sequentially (in order of x1,y1,x2,y2...), and "coef" is the reduction ratio of inclined points (generally 0.9~1) / Box边框8个点顺序连接, 按x1,y1,x2,y2...排列, coef为斜方向点缩小比例(一般为0.9~1)
        if self.pos_mode != PositionMode.BOX_CENTER or self.width is None or self.length is None or self.heading is None:
            return None
        posx = self.posx
        posy = self.posy
        width = self.width
        length = self.length
        heading = self.heading
        pts = [length / 2, 0,
               coef * length / 2, coef * width / 2,
               0, width / 2,
               -coef * length / 2, coef * width / 2,
               -length / 2, 0,
               -coef * length / 2, -coef * width / 2,
               0, -width / 2,
               coef * length / 2, -coef * width / 2]
        deg2rad = pi / 180
        coshd = cos(heading * deg2rad)
        sinhd = sin(heading * deg2rad)
        for i in range(0, 8):
            x = coshd * pts[2 * i] - sinhd * pts[2 * i + 1]
            y = sinhd * pts[2 * i] + coshd * pts[2 * i + 1]
            pts[2 * i] = posx + x
            pts[2 * i + 1] = posy + y
        return pts


class ObjectSensorFov:
    def __init__(self):
        self.position_x: float = 0.0  # [m] X-axis coordination of the FOV's center point / FOV中心点x轴坐标
        self.position_y: float = 0.0  # [m] Y-axis coordination of the FOV's center point / FOV中心的y轴坐标
        self.angle_range: float = 90.0  # [deg] Angle range of the FOV / FOV的角度范围
        self.orientation: float = 0.0  # [deg] Central heading angle of the FOV FOV中轴线朝向角
        self.distance_range: float = 100.0  # [m] Detect range of the FOV / FOV探测距离范围
        self.blind_range: float = 0.0  # [m] Blind range of the FOV / FOV盲区范围


class ObjectSensorSample:
    def __init__(self):
        self.time: float = 0.0  # [s] Time offset in session / Session内的相对时间
        self.objects: List[ObjectInfo] = []  # List of objects / 目标物列表
        self.cipv_index: int = -1  # Index of CIPV object, -1 indicates not exist / 前向关键目标序号，-1表示不存在
        self.lko_index: int = -1  # Index of left side key object, -1 indicates not exist / 左侧关键目标序号，-1表示不存在
        self.rko_index: int = -1  # Index of right side key object, -1 indicates not exist / 右侧关键目标序号，-1表示不存在
        self.fovs: List[ObjectSensorFov] = []  # List of FOV / FOV列表
        self.vehicle_speed: float | None = None  # [KPH] Subject vehicle's speed / 本车车速
        self.vehicle_curvature: float | None = None  # [1/m] Subject vehicle's turning curvature / 本车曲率
        self.vehicle_width: float | None = None  # [m] Subject vehicle's width / 本车宽度
        self.vehicle_length: float | None = None  # [m] Subject vehicle's length / 本车长度
        self.vehicle_wheel_base: float | None = None  # [m] Subject vehicle's wheel base / 本车轴距
        self.vehicle_front_overhang: float | None = None  # [m] Subject vehicle's front overhang / 本车前悬

    # Calculate first order TTC / 计算一阶TTC [s]
    def cal_ttc1(self, obj: ObjectInfo) -> float | None:
        if not isinstance(obj, ObjectInfo) or obj.vx_rel is None:
            return None
        vx_rel_m = obj.vx_rel / 3.6
        if obj.cpx > 0:
            if vx_rel_m >= -0.1:
                return float('inf')
            else:
                return -obj.cpx / vx_rel_m
        elif self.vehicle_length is not None and obj.cpx < -self.vehicle_length:
            if vx_rel_m <= 0.1:
                return float('inf')
            else:
                return (-self.vehicle_length - obj.cpx) / vx_rel_m
        else:
            return None

    # Calculate second order TTC / 计算二阶TTC [s]
    def cal_ttc2(self, obj: ObjectInfo) -> float | None:
        if not isinstance(obj, ObjectInfo) or obj.vx_rel is None or obj.ax_rel is None:
            return None
        vx_rel_m = obj.vx_rel / 3.6
        ax_rel = obj.ax_rel
        if obj.cpx > 0:
            if vx_rel_m >= -0.1:
                return float('inf')
            elif abs(ax_rel) < 0.1:
                return -obj.cpx / vx_rel_m
            elif vx_rel_m * vx_rel_m - 2 * ax_rel * obj.cpx >= 0:
                return (-vx_rel_m - sqrt(vx_rel_m * vx_rel_m - 2 * ax_rel * obj.cpx)) / ax_rel
            else:
                return float('inf')
        elif self.vehicle_length is not None and obj.cpx < -self.vehicle_length:
            dist = -self.vehicle_length - obj.cpx
            if vx_rel_m <= 0.1:
                return float('inf')
            elif abs(ax_rel) < 0.1:
                return (-self.vehicle_length - obj.cpx) / vx_rel_m
            elif vx_rel_m * vx_rel_m + 2 * ax_rel * dist >= 0:
                return (-vx_rel_m + sqrt(vx_rel_m * vx_rel_m + 2 * ax_rel * dist)) / ax_rel
            else:
                return float('inf')
        else:
            return None

    # Calculate time headway / 计算车间时距 [s]
    def cal_thw(self, obj: ObjectInfo) -> float | None:
        if not isinstance(obj, ObjectInfo) or self.vehicle_speed is None:
            return None
        if obj.cpx > 0:
            vx_ego_m = self.vehicle_speed / 3.6
            if vx_ego_m <= 0.1:
                return float('inf')
            else:
                return obj.cpx / vx_ego_m
        else:
            return None

    # Calculate DCA / 计算避免碰撞最小减速度 [m/s²] reactionTime=0: ODCA, >0: PDCA
    def cal_dca(self, obj: ObjectInfo, reaction_time: float) -> float | None:
        if not isinstance(obj, ObjectInfo) or self.vehicle_speed is None or obj.vx_abs is None or obj.ax_abs is None:
            return None
        vx_ego_m = self.vehicle_speed / 3.6
        vx_abs_m = obj.vx_abs / 3.6
        ax_abs = obj.ax_abs
        ego_dx_reaction = vx_ego_m * reaction_time
        obj_dx_reaction = vx_abs_m * reaction_time + 0.5 * ax_abs * reaction_time * reaction_time
        dx_rel_ar = obj.cpx - 0.1 + obj_dx_reaction - ego_dx_reaction
        ego_vx_abs_ar = vx_ego_m
        obj_vx_abs_ar = vx_abs_m + ax_abs * reaction_time
        obj_vx_rel_ar = vx_abs_m - vx_ego_m
        obj_ax_abs_ar = ax_abs
        if dx_rel_ar <= 0:
            return float('-inf')
        if obj_ax_abs_ar >= 0:
            if obj_vx_rel_ar >= 0:
                return 0
            else:
                return min(0, obj_ax_abs_ar - obj_vx_rel_ar * obj_vx_rel_ar / (2 * dx_rel_ar))
        else:
            if obj_vx_rel_ar >= 0:
                return min(0, ego_vx_abs_ar * ego_vx_abs_ar / (obj_vx_abs_ar * obj_vx_abs_ar / obj_ax_abs_ar - 2 * dx_rel_ar))
            elif obj_vx_rel_ar * obj_vx_rel_ar / (2 * dx_rel_ar) < obj_ax_abs_ar:
                return min(0, ego_vx_abs_ar * ego_vx_abs_ar / (obj_vx_abs_ar * obj_vx_abs_ar / obj_ax_abs_ar - 2 * dx_rel_ar))
            else:
                return min(0, obj_ax_abs_ar - obj_vx_rel_ar * obj_vx_rel_ar / (2 * dx_rel_ar))

    # Calculate overlap / 计算重叠率 [%]
    def cal_overlap(self, obj: ObjectInfo, box_corner_ratio: float = 0.98) -> int | None:
        if not isinstance(obj, ObjectInfo) or self.vehicle_curvature is None or self.vehicle_width is None:
            return None
        width_ego = self.vehicle_width
        curv_ego = self.vehicle_curvature
        fo = self.vehicle_front_overhang if self.vehicle_front_overhang is not None else 0.9
        wb = self.vehicle_wheel_base if self.vehicle_wheel_base is not None else 2.8
        if obj.posx <= 0 or width_ego <= 0:
            return None
        width_obj = 1
        if obj.pos_mode == PositionMode.CLOSEST_POINT:
            obj_cat = obj.category()
            if obj_cat == ObjectClass.CAR:
                width_obj = 1.9
            elif obj_cat == ObjectClass.PED:
                width_obj = 0.5
            elif obj_cat == ObjectClass.TRUCK:
                width_obj = 2.1
            elif obj_cat == ObjectClass.BIKE:
                width_obj = 0.6
            elif obj_cat == ObjectClass.RAIL_CAR:
                width_obj = 2.6
        else:  # BoxCenter
            corners = obj.box_corners(box_corner_ratio)
            if corners is not None:
                miny = float('inf')
                maxy = float('-inf')
                for i in range(0, 8):
                    corner_y = corners[2 * i + 1]
                    miny = min(miny, corner_y)
                    maxy = max(maxy, corner_y)
                width_obj = maxy - miny
        dy_ego = 0.5 * curv_ego * obj.posx * obj.posx + curv_ego * (fo + wb) * obj.posx
        dy_obj = obj.posy
        overlap = min(dy_ego + 0.5 * width_ego, dy_obj + 0.5 * width_obj) - max(dy_ego - 0.5 * width_ego, dy_obj - 0.5 * width_obj)
        overlap_rate = overlap / (min(width_ego, width_obj) if overlap >= 0 else width_ego)
        return int(max(-3, overlap_rate) * 100)

    # Calculate lateral clearance / 计算横向间距 [m]
    def cal_lc(self, obj: ObjectInfo, box_corner_ratio: float = 0.98) -> float | None:
        if not isinstance(obj, ObjectInfo) or self.vehicle_width is None:
            return None
        if obj.pos_mode == PositionMode.CLOSEST_POINT:
            if obj.posx > 0:
                return None
            elif obj.posy > 0:
                return obj.posy - self.vehicle_width * 0.5
            else:
                return -self.vehicle_width * 0.5 - obj.posy
        else:  # BoxCenter
            points = obj.box_corners(box_corner_ratio)
            if points is None:
                return None
            minx = float('inf')
            miny = float('inf')
            maxy = float('-inf')
            for i in range(0, 8):
                ptx = points[2 * i]
                pty = points[2 * i + 1]
                minx = min(minx, ptx)
                miny = min(miny, pty)
                maxy = max(maxy, pty)
            if minx > 0:
                return None
            elif miny > 0:
                return miny - self.vehicle_width * 0.5
            elif maxy < 0:
                return -self.vehicle_width * 0.5 - maxy
            else:
                return None

    # Calculate rear clearance / 计算后向间距 [m]
    def cal_rc(self, obj: ObjectInfo, box_corner_ratio: float = 0.98) -> float | None:
        if not isinstance(obj, ObjectInfo) or self.vehicle_length is None:
            return None
        pts = []
        pt_count = 0
        if obj.pos_mode == PositionMode.CLOSEST_POINT:
            pts = [obj.posx, obj.posy]
            pt_count = 1
        else:  # BoxCenter
            pts = obj.box_corners(box_corner_ratio)
            pt_count = 8
        if pts is None:
            return None
        maxx = float('-inf')
        for i in range(0, pt_count):
            ptx = pts[2 * i]
            maxx = max(maxx, ptx)
        if maxx > 0:
            return None
        else:
            return -self.vehicle_length - maxx

    # Convert to general sample for output / 转通用样本，用于样本输出
    def to_general_sample(self, channel: int) -> Any:
        output = bi.agency.create_general_sample()
        output.protocol = "obj-sensor-sample-v6@" + str(channel)
        output.time = self.time
        contour_points = 0
        contour_offsets = []
        contour_sizes = []
        for obj in self.objects:
            point_count = len(obj.contour) / 2
            contour_offsets.append(contour_points)
            contour_sizes.append(point_count)
            contour_points += point_count
        
        total_length = int(16 + len(self.objects) * 52 + len(self.fovs) * 6 + contour_points * 2)
        output.significant = total_length
        
        values: List[float | str | None] = [None] * total_length
        values[0] = len(self.objects)
        values[1] = len(self.fovs)
        values[2] = 0
        values[3] = contour_points
        values[4] = self.cipv_index if self.cipv_index >= 0 else None
        values[5] = self.lko_index if self.lko_index >= 0 else None
        values[6] = self.rko_index if self.rko_index >= 0 else None
        values[7] = self.vehicle_speed
        values[8] = self.vehicle_curvature
        values[9] = self.vehicle_width
        values[10] = self.vehicle_length
        values[11] = self.vehicle_wheel_base
        values[12] = self.vehicle_front_overhang

        i = 0
        for obj in self.objects:
            base_index = 16 + i * 52
            values[base_index] = obj.id
            values[base_index + 1] = obj.age
            values[base_index + 2] = obj.raw_id
            values[base_index + 3] = obj.raw_age
            values[base_index + 4] = obj.raw_class_id
            values[base_index + 5] = obj.classification.value
            values[base_index + 6] = obj.pos_mode.value
            values[base_index + 7] = obj.posx
            values[base_index + 8] = obj.posy
            values[base_index + 9] = obj.cpx
            values[base_index + 10] = obj.cpy
            values[base_index + 11] = obj.cpd
            values[base_index + 12] = obj.width
            values[base_index + 13] = obj.length
            values[base_index + 14] = obj.heading
            values[base_index + 15] = obj.vx_rel
            values[base_index + 16] = obj.vx_abs
            values[base_index + 17] = obj.vy_rel
            values[base_index + 18] = obj.vy_abs
            values[base_index + 19] = obj.ax_rel
            values[base_index + 20] = obj.ax_abs
            values[base_index + 21] = obj.ay_rel
            values[base_index + 22] = obj.ay_abs
            values[base_index + 23] = obj.color.r if obj.color.valid else None
            values[base_index + 24] = obj.color.g if obj.color.valid else None
            values[base_index + 25] = obj.color.b if obj.color.valid else None
            values[base_index + 26] = obj.time_offset
            values[base_index + 27] = obj.confidence
            values[base_index + 28] = obj.class_confidence
            values[base_index + 29] = obj.height
            values[base_index + 30] = obj.posz
            values[base_index + 31] = obj.posx_sigma
            values[base_index + 32] = obj.posy_sigma
            values[base_index + 33] = obj.posz_sigma
            values[base_index + 34] = self.cal_ttc1(obj)
            values[base_index + 35] = self.cal_ttc2(obj)
            values[base_index + 36] = self.cal_thw(obj)
            values[base_index + 37] = self.cal_dca(obj, 1.0)
            values[base_index + 38] = self.cal_dca(obj, 0.0)
            values[base_index + 39] = self.cal_overlap(obj)
            values[base_index + 40] = self.cal_lc(obj)
            values[base_index + 41] = self.cal_rc(obj)
            values[base_index + 42] = obj.rcs
            values[base_index + 43] = obj.v_lng
            values[base_index + 44] = obj.v_lat
            values[base_index + 45] = obj.yaw_rate
            values[base_index + 46] = obj.curvature
            values[base_index + 47] = obj.speed()
            values[base_index + 48] = 0
            values[base_index + 49] = 0
            values[base_index + 50] = contour_offsets[i]
            values[base_index + 51] = contour_sizes[i]
            i += 1
        
        fov_base = 16 + len(self.objects) * 52
        for i, fov in enumerate(self.fovs):
            fov_index = fov_base + i * 6
            values[fov_index] = fov.position_x
            values[fov_index + 1] = fov.position_y
            values[fov_index + 2] = fov.orientation
            values[fov_index + 3] = fov.angle_range
            values[fov_index + 4] = fov.distance_range
            values[fov_index + 5] = fov.blind_range
        
        contour_base = fov_base + len(self.fovs) * 6
        contour_index = 0
        for obj in self.objects:
            for point in obj.contour:
                values[contour_base + contour_index] = point
                contour_index += 1
        
        output.values = values
        return output

    # Convert to general sample for output / 转通用样本，用于样本输出
    def to_general_sample_v7(self, channel: int) -> Any:
        output = bi.agency.create_general_sample()
        output.protocol = "obj-sensor-sample-v7@" + str(channel)
        output.time = self.time
        contour_points = 0
        contour_offsets = []
        contour_sizes = []
        for obj in self.objects:
            point_count = len(obj.contour) / 2
            contour_offsets.append(contour_points)
            contour_sizes.append(point_count)
            contour_points += point_count
        
        total_length = int(20 + len(self.objects) * 72 + len(self.fovs) * 6 + contour_points * 2)
        output.significant = total_length
        
        values: List[float | str | None] = [None] * total_length
        values[0] = len(self.objects)
        values[1] = len(self.fovs)
        values[2] = 0
        values[3] = contour_points
        values[4] = self.cipv_index if self.cipv_index >= 0 else None
        values[5] = self.lko_index if self.lko_index >= 0 else None
        values[6] = self.rko_index if self.rko_index >= 0 else None
        values[7] = self.vehicle_speed
        values[8] = self.vehicle_curvature
        values[9] = self.vehicle_width
        values[10] = self.vehicle_length
        values[11] = self.vehicle_wheel_base
        values[12] = self.vehicle_front_overhang

        i = 0
        for obj in self.objects:
            base_index = 20 + i * 72
            values[base_index] = obj.id
            values[base_index + 1] = obj.age
            values[base_index + 2] = obj.raw_id
            values[base_index + 3] = obj.raw_age
            values[base_index + 4] = obj.raw_class_id
            values[base_index + 5] = obj.classification.value
            values[base_index + 6] = obj.pos_mode.value
            values[base_index + 7] = obj.posx
            values[base_index + 8] = obj.posy
            values[base_index + 9] = obj.posz
            values[base_index + 10] = obj.posx_sigma
            values[base_index + 11] = obj.posy_sigma
            values[base_index + 12] = obj.posz_sigma
            values[base_index + 13] = obj.cpx
            values[base_index + 14] = obj.cpy
            values[base_index + 15] = obj.cpd
            values[base_index + 16] = obj.width
            values[base_index + 17] = obj.length
            values[base_index + 18] = obj.height
            values[base_index + 19] = obj.heading
            values[base_index + 20] = obj.vx_rel
            values[base_index + 21] = obj.vx_abs
            values[base_index + 22] = obj.vy_rel
            values[base_index + 23] = obj.vy_abs
            values[base_index + 24] = obj.vz_rel
            values[base_index + 25] = obj.vz_abs
            values[base_index + 26] = obj.ax_rel
            values[base_index + 27] = obj.ax_abs
            values[base_index + 28] = obj.ay_rel
            values[base_index + 29] = obj.ay_abs
            values[base_index + 30] = obj.az_rel
            values[base_index + 31] = obj.az_abs
            values[base_index + 32] = obj.color.r if obj.color.valid else None
            values[base_index + 33] = obj.color.g if obj.color.valid else None
            values[base_index + 34] = obj.color.b if obj.color.valid else None
            values[base_index + 35] = obj.time_offset
            values[base_index + 36] = obj.confidence
            values[base_index + 37] = obj.class_confidence
            values[base_index + 38] = self.cal_ttc1(obj)
            values[base_index + 39] = self.cal_ttc2(obj)
            values[base_index + 40] = self.cal_thw(obj)
            values[base_index + 41] = self.cal_dca(obj, 1.0)
            values[base_index + 42] = self.cal_dca(obj, 0.0)
            values[base_index + 43] = self.cal_overlap(obj)
            values[base_index + 44] = self.cal_lc(obj)
            values[base_index + 45] = self.cal_rc(obj)
            values[base_index + 46] = obj.rcs
            values[base_index + 47] = obj.v_lng
            values[base_index + 48] = obj.v_lat
            values[base_index + 49] = obj.yaw_rate
            values[base_index + 50] = obj.curvature
            values[base_index + 51] = obj.speed()
            values[base_index + 70] = 0
            values[base_index + 71] = 0
            values[base_index + 72] = contour_offsets[i]
            values[base_index + 73] = contour_sizes[i]
            i += 1
        
        fov_base = 20 + len(self.objects) * 72
        for i, fov in enumerate(self.fovs):
            fov_index = fov_base + i * 6
            values[fov_index] = fov.position_x
            values[fov_index + 1] = fov.position_y
            values[fov_index + 2] = fov.orientation
            values[fov_index + 3] = fov.angle_range
            values[fov_index + 4] = fov.distance_range
            values[fov_index + 5] = fov.blind_range
        
        contour_base = fov_base + len(self.fovs) * 6
        contour_index = 0
        for obj in self.objects:
            for point in obj.contour:
                values[contour_base + contour_index] = point
                contour_index += 1
        
        output.values = values
        return output


def _conv_obj_sensor_sample_v5(gs: Any) -> ObjectSensorSample | None:
    values_count = len(gs.values)
    if values_count < 11:
        return None
    object_count = int(gs.values[0]) if gs.values[0] is not None else 0
    trajectory_count = int(gs.values[1]) if gs.values[1] is not None else 0
    contour_count = int(gs.values[2]) if gs.values[2] is not None else 0
    fov_count = int(gs.values[10]) if gs.values[10] is not None else 0
    size_with_extra = 11 + object_count * 42 + fov_count * 6 + (trajectory_count + contour_count) * 2
    size_without_extra = 11 + object_count * 42 + fov_count * 6
    if values_count != size_with_extra and values_count != size_without_extra:
        return None
    output = ObjectSensorSample()
    output.time = gs.time
    output.cipv_index = int(gs.values[3]) if gs.values[3] is not None else -1
    output.lko_index = int(gs.values[4]) if gs.values[4] is not None else -1
    output.rko_index = int(gs.values[5]) if gs.values[5] is not None else -1
    output.vehicle_speed = float(gs.values[6]) if gs.values[6] is not None else None
    output.vehicle_curvature = float(gs.values[7]) if gs.values[7] is not None else None
    output.vehicle_width = float(gs.values[8]) if gs.values[8] is not None else None
    output.vehicle_length = float(gs.values[9]) if gs.values[9] is not None else None
    contour_base = 11 + object_count * 42 + fov_count * 6 + trajectory_count * 2
    for i in range(0, object_count):
        obj = ObjectInfo()
        b = 11 + 42 * i
        obj.id = int(gs.values[b] if gs.values[b] is not None else 0)
        obj.age = int(gs.values[b + 1] if gs.values[b + 1] is not None else 0)
        obj.raw_id = int(gs.values[b + 2]) if gs.values[b + 2] is not None else None
        obj.raw_age = int(gs.values[b + 3]) if gs.values[b + 3] is not None else None
        obj.raw_class_id = int(gs.values[b + 4]) if gs.values[b + 4] is not None else None
        obj.classification = ObjectClass(int(gs.values[b + 5])) if gs.values[b + 5] is not None else ObjectClass.GENERAL
        obj.pos_mode = PositionMode(int(gs.values[b + 6])) if gs.values[b + 6] is not None else PositionMode.CLOSEST_POINT
        obj.posx = float(gs.values[b + 7]) if gs.values[b + 7] is not None else 0.0
        obj.posy = float(gs.values[b + 8]) if gs.values[b + 8] is not None else 0.0
        obj.cpx = float(gs.values[b + 9]) if gs.values[b + 9] is not None else 0.0
        obj.cpy = float(gs.values[b + 10]) if gs.values[b + 10] is not None else 0.0
        obj.cpd = float(gs.values[b + 11]) if gs.values[b + 11] is not None else 0.0
        obj.width = float(gs.values[b + 12]) if gs.values[b + 12] is not None else None
        obj.length = float(gs.values[b + 13]) if gs.values[b + 13] is not None else None
        obj.heading = float(gs.values[b + 14]) if gs.values[b + 14] is not None else None
        obj.vx_rel = float(gs.values[b + 15]) if gs.values[b + 15] is not None else None
        obj.vx_abs = float(gs.values[b + 16]) if gs.values[b + 16] is not None else None
        obj.vy_rel = float(gs.values[b + 17]) if gs.values[b + 17] is not None else None
        obj.vy_abs = float(gs.values[b + 18]) if gs.values[b + 18] is not None else None
        obj.ax_rel = float(gs.values[b + 19]) if gs.values[b + 19] is not None else None
        obj.ax_abs = float(gs.values[b + 20]) if gs.values[b + 20] is not None else None
        obj.ay_rel = float(gs.values[b + 21]) if gs.values[b + 21] is not None else None
        obj.ay_abs = float(gs.values[b + 22]) if gs.values[b + 22] is not None else None
        cont_ok = False
        cont_offset = 0
        cont_size = 0
        if gs.values[b + 33] is not None and gs.values[b + 34] is not None:
            cont_offset = int(gs.values[b + 33])
            cont_size = int(gs.values[b + 34])
            cont_ok = True
        obj.time_offset = int(gs.values[b + 35]) if gs.values[b + 35] is not None else None
        if gs.values[b + 36] is not None and gs.values[b + 37] is not None and gs.values[b + 38] is not None:
            obj.color.valid = True
            obj.color.r = int(gs.values[b + 36])
            obj.color.g = int(gs.values[b + 37])
            obj.color.b = int(gs.values[b + 38])
        obj.class_confidence = float(gs.values[b + 39]) if gs.values[b + 39] is not None else None
        obj.posx_sigma = float(gs.values[b + 40]) if gs.values[b + 40] is not None else None
        obj.posy_sigma = float(gs.values[b + 41]) if gs.values[b + 41] is not None else None
        if values_count == size_with_extra and cont_ok:
            for i in range(0, cont_size):
                obj.contour.append(float(gs.values[contour_base + cont_offset * 2 + i * 2]))
                obj.contour.append(float(gs.values[contour_base + cont_offset * 2 + i * 2 + 1]))
        output.objects.append(obj)
    for i in range(0, fov_count):
        fov = ObjectSensorFov()
        b = 11 + object_count * 42 + 6 * i
        fov.position_x = float(gs.values[b])
        fov.position_y = float(gs.values[b + 1])
        fov.orientation = float(gs.values[b + 2])
        fov.angle_range = float(gs.values[b + 3])
        fov.distance_range = float(gs.values[b + 4])
        fov.blind_range = float(gs.values[b + 5])
        output.fovs.append(fov)
    return output


def _conv_obj_sensor_sample_v6(gs: Any) -> ObjectSensorSample | None:
    values_count = len(gs.values)
    if values_count < 16:
        return None
    object_count = int(gs.values[0]) if gs.values[0] is not None else 0
    fov_count = int(gs.values[1]) if gs.values[1] is not None else 0
    trajectory_count = int(gs.values[2]) if gs.values[2] is not None else 0
    contour_count = int(gs.values[3]) if gs.values[3] is not None else 0
    size_with_extra = 16 + object_count * 52 + fov_count * 6 + (trajectory_count + contour_count) * 2
    size_without_extra = 16 + object_count * 52 + fov_count * 6
    if values_count != size_with_extra and values_count != size_without_extra:
        return None
    output = ObjectSensorSample()
    output.time = gs.time
    output.cipv_index = int(gs.values[4]) if gs.values[4] is not None else -1
    output.lko_index = int(gs.values[5]) if gs.values[5] is not None else -1
    output.rko_index = int(gs.values[6]) if gs.values[6] is not None else -1
    output.vehicle_speed = float(gs.values[7]) if gs.values[7] is not None else None
    output.vehicle_curvature = float(gs.values[8]) if gs.values[8] is not None else None
    output.vehicle_width = float(gs.values[9]) if gs.values[9] is not None else None
    output.vehicle_length = float(gs.values[10]) if gs.values[10] is not None else None
    output.vehicle_wheel_base = float(gs.values[11]) if gs.values[11] is not None else None
    output.vehicle_front_overhang = float(gs.values[12]) if gs.values[12] is not None else None
    contour_base = 16 + object_count * 52 + fov_count * 6 + trajectory_count * 2
    for i in range(0, object_count):
        obj = ObjectInfo()
        b = 16 + 52 * i
        obj.id = int(gs.values[b] if gs.values[b] is not None else 0)
        obj.age = int(gs.values[b + 1] if gs.values[b + 1] is not None else 0)
        obj.raw_id = int(gs.values[b + 2]) if gs.values[b + 2] is not None else None
        obj.raw_age = int(gs.values[b + 3]) if gs.values[b + 3] is not None else None
        obj.raw_class_id = int(gs.values[b + 4]) if gs.values[b + 4] is not None else None
        obj.classification = ObjectClass(int(gs.values[b + 5])) if gs.values[b + 5] is not None else ObjectClass.GENERAL
        obj.pos_mode = PositionMode(int(gs.values[b + 6])) if gs.values[b + 6] is not None else PositionMode.CLOSEST_POINT
        obj.posx = float(gs.values[b + 7]) if gs.values[b + 7] is not None else 0.0
        obj.posy = float(gs.values[b + 8]) if gs.values[b + 8] is not None else 0.0
        obj.cpx = float(gs.values[b + 9]) if gs.values[b + 9] is not None else 0.0
        obj.cpy = float(gs.values[b + 10]) if gs.values[b + 10] is not None else 0.0
        obj.cpd = float(gs.values[b + 11]) if gs.values[b + 11] is not None else 0.0
        obj.width = float(gs.values[b + 12]) if gs.values[b + 12] is not None else None
        obj.length = float(gs.values[b + 13]) if gs.values[b + 13] is not None else None
        obj.heading = float(gs.values[b + 14]) if gs.values[b + 14] is not None else None
        obj.vx_rel = float(gs.values[b + 15]) if gs.values[b + 15] is not None else None
        obj.vx_abs = float(gs.values[b + 16]) if gs.values[b + 16] is not None else None
        obj.vy_rel = float(gs.values[b + 17]) if gs.values[b + 17] is not None else None
        obj.vy_abs = float(gs.values[b + 18]) if gs.values[b + 18] is not None else None
        obj.ax_rel = float(gs.values[b + 19]) if gs.values[b + 19] is not None else None
        obj.ax_abs = float(gs.values[b + 20]) if gs.values[b + 20] is not None else None
        obj.ay_rel = float(gs.values[b + 21]) if gs.values[b + 21] is not None else None
        obj.ay_abs = float(gs.values[b + 22]) if gs.values[b + 22] is not None else None
        if gs.values[b + 23] is not None and gs.values[b + 24] is not None and gs.values[b + 25] is not None:
            obj.color.valid = True
            obj.color.r = int(gs.values[b + 23])
            obj.color.g = int(gs.values[b + 24])
            obj.color.b = int(gs.values[b + 25])
        obj.time_offset = int(gs.values[b + 26]) if gs.values[b + 26] is not None else None
        obj.confidence = float(gs.values[b + 27]) if gs.values[b + 27] is not None else None
        obj.class_confidence = float(gs.values[b + 28]) if gs.values[b + 28] is not None else None
        obj.height = float(gs.values[b + 29]) if gs.values[b + 29] is not None else None
        obj.posz = float(gs.values[b + 30]) if gs.values[b + 30] is not None else 0.0
        obj.posx_sigma = float(gs.values[b + 31]) if gs.values[b + 31] is not None else None
        obj.posy_sigma = float(gs.values[b + 32]) if gs.values[b + 32] is not None else None
        obj.posz_sigma = float(gs.values[b + 33]) if gs.values[b + 33] is not None else None
        obj.rcs = float(gs.values[b + 42]) if gs.values[b + 42] is not None else None
        obj.v_lng = float(gs.values[b + 43]) if gs.values[b + 43] is not None else None
        obj.v_lat = float(gs.values[b + 44]) if gs.values[b + 44] is not None else None
        obj.yaw_rate = float(gs.values[b + 45]) if gs.values[b + 45] is not None else None
        obj.curvature = float(gs.values[b + 46]) if gs.values[b + 46] is not None else None
        cont_ok = False
        cont_offset = 0
        cont_size = 0
        if gs.values[b + 50] is not None and gs.values[b + 51] is not None:
            cont_offset = int(gs.values[b + 50])
            cont_size = int(gs.values[b + 51])
            cont_ok = True
        if values_count == size_with_extra and cont_ok:
            for i in range(0, cont_size):
                obj.contour.append(float(gs.values[contour_base + cont_offset * 2 + i * 2]))
                obj.contour.append(float(gs.values[contour_base + cont_offset * 2 + i * 2 + 1]))
        output.objects.append(obj)
    for i in range(0, fov_count):
        fov = ObjectSensorFov()
        b = 16 + object_count * 52 + 6 * i
        fov.position_x = float(gs.values[b])
        fov.position_y = float(gs.values[b + 1])
        fov.orientation = float(gs.values[b + 2])
        fov.angle_range = float(gs.values[b + 3])
        fov.distance_range = float(gs.values[b + 4])
        fov.blind_range = float(gs.values[b + 5])
        output.fovs.append(fov)
    return output


def _conv_obj_sensor_sample_v7(gs: Any) -> ObjectSensorSample | None:
    values_count = len(gs.values)
    if values_count < 20:
        return None
    object_count = int(gs.values[0]) if gs.values[0] is not None else 0
    fov_count = int(gs.values[1]) if gs.values[1] is not None else 0
    trajectory_count = int(gs.values[2]) if gs.values[2] is not None else 0
    contour_count = int(gs.values[3]) if gs.values[3] is not None else 0
    size_with_extra = 20 + object_count * 72 + fov_count * 6 + (trajectory_count + contour_count) * 2
    size_without_extra = 20 + object_count * 72 + fov_count * 6
    if values_count != size_with_extra and values_count != size_without_extra:
        return None
    output = ObjectSensorSample()
    output.time = gs.time
    output.cipv_index = int(gs.values[4]) if gs.values[4] is not None else -1
    output.lko_index = int(gs.values[5]) if gs.values[5] is not None else -1
    output.rko_index = int(gs.values[6]) if gs.values[6] is not None else -1
    output.vehicle_speed = float(gs.values[7]) if gs.values[7] is not None else None
    output.vehicle_curvature = float(gs.values[8]) if gs.values[8] is not None else None
    output.vehicle_width = float(gs.values[9]) if gs.values[9] is not None else None
    output.vehicle_length = float(gs.values[10]) if gs.values[10] is not None else None
    output.vehicle_wheel_base = float(gs.values[11]) if gs.values[11] is not None else None
    output.vehicle_front_overhang = float(gs.values[12]) if gs.values[12] is not None else None
    contour_base = 20 + object_count * 72 + fov_count * 6 + trajectory_count * 2
    for i in range(0, object_count):
        obj = ObjectInfo()
        b = 20 + 72 * i
        obj.id = int(gs.values[b] if gs.values[b] is not None else 0)
        obj.age = int(gs.values[b + 1] if gs.values[b + 1] is not None else 0)
        obj.raw_id = int(gs.values[b + 2]) if gs.values[b + 2] is not None else None
        obj.raw_age = int(gs.values[b + 3]) if gs.values[b + 3] is not None else None
        obj.raw_class_id = int(gs.values[b + 4]) if gs.values[b + 4] is not None else None
        obj.classification = ObjectClass(int(gs.values[b + 5])) if gs.values[b + 5] is not None else ObjectClass.GENERAL
        obj.pos_mode = PositionMode(int(gs.values[b + 6])) if gs.values[b + 6] is not None else PositionMode.CLOSEST_POINT
        obj.posx = float(gs.values[b + 7]) if gs.values[b + 7] is not None else 0.0
        obj.posy = float(gs.values[b + 8]) if gs.values[b + 8] is not None else 0.0
        obj.posz = float(gs.values[b + 9]) if gs.values[b + 9] is not None else 0.0
        obj.posx_sigma = float(gs.values[b + 10]) if gs.values[b + 10] is not None else None
        obj.posy_sigma = float(gs.values[b + 11]) if gs.values[b + 11] is not None else None
        obj.posz_sigma = float(gs.values[b + 12]) if gs.values[b + 12] is not None else None
        obj.cpx = float(gs.values[b + 13]) if gs.values[b + 13] is not None else 0.0
        obj.cpy = float(gs.values[b + 14]) if gs.values[b + 14] is not None else 0.0
        obj.cpd = float(gs.values[b + 15]) if gs.values[b + 15] is not None else 0.0
        obj.width = float(gs.values[b + 16]) if gs.values[b + 16] is not None else None
        obj.length = float(gs.values[b + 17]) if gs.values[b + 17] is not None else None
        obj.height = float(gs.values[b + 18]) if gs.values[b + 18] is not None else None
        obj.heading = float(gs.values[b + 19]) if gs.values[b + 19] is not None else None
        obj.vx_rel = float(gs.values[b + 20]) if gs.values[b + 20] is not None else None
        obj.vx_abs = float(gs.values[b + 21]) if gs.values[b + 21] is not None else None
        obj.vy_rel = float(gs.values[b + 22]) if gs.values[b + 22] is not None else None
        obj.vy_abs = float(gs.values[b + 23]) if gs.values[b + 23] is not None else None
        obj.vz_rel = float(gs.values[b + 24]) if gs.values[b + 24] is not None else None
        obj.vz_abs = float(gs.values[b + 25]) if gs.values[b + 25] is not None else None
        obj.ax_rel = float(gs.values[b + 26]) if gs.values[b + 26] is not None else None
        obj.ax_abs = float(gs.values[b + 27]) if gs.values[b + 27] is not None else None
        obj.ay_rel = float(gs.values[b + 28]) if gs.values[b + 28] is not None else None
        obj.ay_abs = float(gs.values[b + 29]) if gs.values[b + 29] is not None else None
        obj.az_rel = float(gs.values[b + 30]) if gs.values[b + 30] is not None else None
        obj.az_abs = float(gs.values[b + 31]) if gs.values[b + 31] is not None else None
        if gs.values[b + 32] is not None and gs.values[b + 33] is not None and gs.values[b + 34] is not None:
            obj.color.valid = True
            obj.color.r = int(gs.values[b + 32])
            obj.color.g = int(gs.values[b + 33])
            obj.color.b = int(gs.values[b + 34])
        obj.time_offset = int(gs.values[b + 35]) if gs.values[b + 35] is not None else None
        obj.confidence = float(gs.values[b + 36]) if gs.values[b + 36] is not None else None
        obj.class_confidence = float(gs.values[b + 37]) if gs.values[b + 37] is not None else None
        obj.rcs = float(gs.values[b + 46]) if gs.values[b + 46] is not None else None
        obj.v_lng = float(gs.values[b + 47]) if gs.values[b + 47] is not None else None
        obj.v_lat = float(gs.values[b + 48]) if gs.values[b + 48] is not None else None
        obj.yaw_rate = float(gs.values[b + 49]) if gs.values[b + 49] is not None else None
        obj.curvature = float(gs.values[b + 50]) if gs.values[b + 50] is not None else None
        cont_ok = False
        cont_offset = 0
        cont_size = 0
        if gs.values[b + 70] is not None and gs.values[b + 71] is not None:
            cont_offset = int(gs.values[b + 70])
            cont_size = int(gs.values[b + 71])
            cont_ok = True
        if values_count == size_with_extra and cont_ok:
            for i in range(0, cont_size):
                obj.contour.append(float(gs.values[contour_base + cont_offset * 2 + i * 2]))
                obj.contour.append(float(gs.values[contour_base + cont_offset * 2 + i * 2 + 1]))
        output.objects.append(obj)
    for i in range(0, fov_count):
        fov = ObjectSensorFov()
        b = 20 + object_count * 72 + 6 * i
        fov.position_x = float(gs.values[b])
        fov.position_y = float(gs.values[b + 1])
        fov.orientation = float(gs.values[b + 2])
        fov.angle_range = float(gs.values[b + 3])
        fov.distance_range = float(gs.values[b + 4])
        fov.blind_range = float(gs.values[b + 5])
        output.fovs.append(fov)
    return output


def _interpolate_angle(a1: float, w1: float, a2: float, w2: float) -> float | None:
    deg2rad = pi / 180
    x1 = cos(a1 * deg2rad)
    y1 = sin(a1 * deg2rad)
    x2 = cos(a2 * deg2rad)
    y2 = sin(a2 * deg2rad)
    xo = x1 * w1 + x2 * w2
    yo = y1 * w1 + y2 * w2
    if xo == 0 and yo == 0:
        return None
    return atan2(yo, xo) / deg2rad


def _interpolate_obj_sensor_sample(s1: ObjectSensorSample, w1: float, s2: ObjectSensorSample, w2: float) -> ObjectSensorSample:
    output = ObjectSensorSample()
    output.time = bi.time
    output.fovs = s1.fovs
    output.vehicle_width = s1.vehicle_width
    output.vehicle_length = s1.vehicle_length
    output.vehicle_wheel_base = s1.vehicle_wheel_base
    output.vehicle_front_overhang = s1.vehicle_front_overhang
    if s1.vehicle_speed is not None and s2.vehicle_speed is not None:
        output.vehicle_speed = s1.vehicle_speed * w1 + s2.vehicle_speed * w2
    if s1.vehicle_curvature is not None and s2.vehicle_curvature is not None:
        output.vehicle_curvature = s1.vehicle_curvature * w1 + s2.vehicle_curvature * w2
    cipv_id = -1
    if s1.cipv_index >= 0 and s2.cipv_index >= 0 and s1.objects[s1.cipv_index].id == s2.objects[s2.cipv_index].id:
        cipv_id = s1.objects[s1.cipv_index].id
    lko_id = -1
    if s1.lko_index >= 0 and s2.lko_index >= 0 and s1.objects[s1.lko_index].id == s2.objects[s2.lko_index].id:
        lko_id = s1.objects[s1.lko_index].id
    rko_id = -1
    if s1.rko_index >= 0 and s2.rko_index >= 0 and s1.objects[s1.rko_index].id == s2.objects[s2.rko_index].id:
        rko_id = s1.objects[s1.rko_index].id
    index = 0
    for o1 in s1.objects:
        id = o1.id
        for o2 in s2.objects:
            if o2.id != id:
                continue
            if cipv_id == id:
                output.cipv_index = index
            if lko_id == id:
                output.lko_index = index
            if rko_id == id:
                output.rko_index = index
            obj = ObjectInfo()
            obj.id = id
            obj.age = o1.age
            obj.raw_id = o1.raw_id
            obj.raw_age = o1.raw_age
            obj.confidence = o1.confidence if w1 > w2 else o2.confidence
            if o1.time_offset is not None and o2.time_offset is not None:
                obj.time_offset = int(o1.time_offset * w1 + o2.time_offset * w2)
            obj.color = o1.color if w1 > w2 else o2.color
            obj.classification = o1.classification if w1 > w2 else o2.classification
            obj.raw_class_id = o1.raw_class_id if w1 > w2 else o2.raw_class_id
            obj.class_confidence = o1.class_confidence if w1 > w2 else o2.class_confidence
            obj.pos_mode = o1.pos_mode
            obj.posx = o1.posx * w1 + o2.posx * w2
            obj.posy = o1.posy * w1 + o2.posy * w2
            obj.posz = o1.posz * w1 + o2.posz * w2
            if o1.posx_sigma is not None and o2.posx_sigma is not None:
                obj.posx_sigma = o1.posx_sigma * w1 + o2.posx_sigma * w2
            if o1.posy_sigma is not None and o2.posy_sigma is not None:
                obj.posy_sigma = o1.posy_sigma * w1 + o2.posy_sigma * w2
            if o1.posz_sigma is not None and o2.posz_sigma is not None:
                obj.posz_sigma = o1.posz_sigma * w1 + o2.posz_sigma * w2
            obj.cpx = o1.cpx * w1 + o2.cpx * w2
            obj.cpy = o1.cpy * w1 + o2.cpy * w2
            obj.cpd = o1.cpd * w1 + o2.cpd * w2
            if o1.vx_abs is not None and o2.vx_abs is not None:
                obj.vx_abs = o1.vx_abs * w1 + o2.vx_abs * w2
            if o1.vy_abs is not None and o2.vy_abs is not None:
                obj.vy_abs = o1.vy_abs * w1 + o2.vy_abs * w2
            if o1.vz_abs is not None and o2.vz_abs is not None:
                obj.vz_abs = o1.vz_abs * w1 + o2.vz_abs * w2
            if o1.vx_rel is not None and o2.vx_rel is not None:
                obj.vx_rel = o1.vx_rel * w1 + o2.vx_rel * w2
            if o1.vy_rel is not None and o2.vy_rel is not None:
                obj.vy_rel = o1.vy_rel * w1 + o2.vy_rel * w2
            if o1.vz_rel is not None and o2.vz_rel is not None:
                obj.vz_rel = o1.vz_rel * w1 + o2.vz_rel * w2
            if o1.ax_abs is not None and o2.ax_abs is not None:
                obj.ax_abs = o1.ax_abs * w1 + o2.ax_abs * w2
            if o1.ay_abs is not None and o2.ay_abs is not None:
                obj.ay_abs = o1.ay_abs * w1 + o2.ay_abs * w2
            if o1.az_abs is not None and o2.az_abs is not None:
                obj.az_abs = o1.az_abs * w1 + o2.az_abs * w2
            if o1.ax_rel is not None and o2.ax_rel is not None:
                obj.ax_rel = o1.ax_rel * w1 + o2.ax_rel * w2
            if o1.ay_rel is not None and o2.ay_rel is not None:
                obj.ay_rel = o1.ay_rel * w1 + o2.ay_rel * w2
            if o1.az_rel is not None and o2.az_rel is not None:
                obj.az_rel = o1.az_rel * w1 + o2.az_rel * w2
            if o1.heading is not None and o2.heading is not None:
                obj.heading = _interpolate_angle(o1.heading, w1, o2.heading, w2)
            if o1.width is not None and o2.width is not None:
                obj.width = o1.width * w1 + o2.width * w2
            if o1.length is not None and o2.length is not None:
                obj.length = o1.length * w1 + o2.length * w2
            if o1.height is not None and o2.height is not None:
                obj.height = o1.height * w1 + o2.height * w2
            if o1.rcs is not None and o2.rcs is not None:
                obj.rcs = o1.rcs * w1 + o2.rcs * w2
            if o1.v_lng is not None and o2.v_lng is not None:
                obj.v_lng = o1.v_lng * w1 + o2.v_lng * w2
            if o1.v_lat is not None and o2.v_lat is not None:
                obj.v_lat = o1.v_lat * w1 + o2.v_lat * w2
            if o1.yaw_rate is not None and o2.yaw_rate is not None:
                obj.yaw_rate = o1.yaw_rate * w1 + o2.yaw_rate * w2
            if o1.curvature is not None and o2.curvature is not None:
                obj.curvature = o1.curvature * w1 + o2.curvature * w2
            obj.contour = o1.contour if w1 > w2 else o2.contour
            output.objects.append(obj)
            index += 1
    return output


# Query ObjectSensorSample for input / 获取ObjectSensorSample，用于样本输入
def get_obj_sensor_sample(channel: int) -> ObjectSensorSample | None:
    s1 = None
    s2 = None
    w1 = 0.0
    w2 = 0.0
    protocol_id_v5 = 'obj-sensor-sample-v5@' + str(channel)
    protocol_id_v6 = 'obj-sensor-sample-v6@' + str(channel)
    protocol_id_v7 = 'obj-sensor-sample-v7@' + str(channel)
    if protocol_id_v7 in bi.input_samples:
        pair = bi.input_samples[protocol_id_v7]
        s1 = _conv_obj_sensor_sample_v7(pair.sample1)
        w1 = pair.weight1
        s2 = _conv_obj_sensor_sample_v7(pair.sample2)
        w2 = pair.weight2
    elif protocol_id_v6 in bi.input_samples:
        pair = bi.input_samples[protocol_id_v6]
        s1 = _conv_obj_sensor_sample_v6(pair.sample1)
        w1 = pair.weight1
        s2 = _conv_obj_sensor_sample_v6(pair.sample2)
        w2 = pair.weight2
    elif protocol_id_v5 in bi.input_samples:
        pair = bi.input_samples[protocol_id_v5]
        s1 = _conv_obj_sensor_sample_v5(pair.sample1)
        w1 = pair.weight1
        s2 = _conv_obj_sensor_sample_v5(pair.sample2)
        w2 = pair.weight2
    if s1 is not None and s2 is not None:
        return _interpolate_obj_sensor_sample(s1, w1, s2, w2)
    return None
