from typing import Callable

import numpy as np
from dora import DoraStatus
from dora_utils import (
    get_extrinsic_matrix,
    get_intrinsic_matrix,
    get_projection_matrix,
    local_points_to_camera_view,
)
from sklearn.neighbors import KNeighborsRegressor
import cv2
import math

import pickle
import open3d
import os

DEPTH_IMAGE_WIDTH = 1920
DEPTH_IMAGE_HEIGHT = 1080
DEPTH_FOV = 90
CAMERA_POSITION = np.array([2.0, 0, 1.0, 0, 0, 0])
INTRINSIC_MATRIX = get_intrinsic_matrix(
    DEPTH_IMAGE_WIDTH, DEPTH_IMAGE_HEIGHT, DEPTH_FOV
)

INV_INTRINSIC_MATRIX = np.linalg.inv(INTRINSIC_MATRIX)
VELODYNE_MATRIX = np.array([[0, 0, 1], [1, 0, 0], [0, -1, 0]])
UNREAL_MATRIX = np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
INV_UNREAL_MATRIX = np.linalg.inv(UNREAL_MATRIX)
INV_VELODYNE_MATRIX = np.linalg.inv(VELODYNE_MATRIX)

OBSTABLE_CONFIDENCE_THRESHOLD = 0.5

WITH_KALMAN = True

class Predict_obstacle:
    def __init__(self):
        self.kf = {}
        self.dt = 0.1
        self.diff_threshold = 0.5
        self.frames = 0
    def distance_compute(self, point1, point2):
        return math.sqrt(math.pow(point1[0]-point2[0], 2)+math.pow(point1[1]-point2[1], 2)) > self.diff_threshold
    def create_kf(self, label):
        if label not in self.kf:
            kf = cv2.KalmanFilter(4, 2)
            kf.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32) 
            kf.transitionMatrix = np.array([[1, 0, self.dt, 0], [0, 1, 0, self.dt], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
            kf.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32)  * 1e-3
            self.kf[label] = []
            self.kf[label].append({"filter": kf, "frames": 0, "update": False, "points": []})
        else:
            kf = cv2.KalmanFilter(4, 2)
            kf.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
            kf.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
            kf.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32) *1e-3
            self.kf[label].append({"filter": kf, "frames": 0, "update": False, "points": []})
    def correct(self, kf, coordX, coordY, coordZ):
        measured = np.array([[np.float32(coordX)], [np.float32(coordY)]])
        kf["filter"].correct(measured)
        kf["frames"] += 1
        kf["points"].append([coordX, coordY, coordZ])
        kf["update"] = True
    def update(self):
        for kf_group in self.kf:
            new_group = []
            for kf in self.kf[kf_group]:
                if kf["update"]:
                    kf["update"] = False
                    new_group.append(kf)
            self.kf[kf_group] = new_group
    def predict(self, label, coordX, coordY, coordZ):
        if label not in self.kf:
            self.create_kf(label)
            kf = self.kf[label][-1]
        else:
            for k in self.kf[label]:
                if not self.distance_compute(k["points"][-1], [coordX, coordY]):
                    kf = k
                    break
            else:
                self.create_kf(label)
                kf = self.kf[label][-1]
        self.correct(kf, coordX, coordY, coordZ)
        
        predict_points = []
        if kf["frames"] > 5:
            z_avg = sum([i[2] for i in kf["points"][-3:]])/3
            for idx in range(20):
                predicted = kf["filter"].predict()
                x, y = predicted[0], predicted[1]
                predict_points.append([float(x), float(y), z_avg, float(predicted[2]), float(predicted[3])])
            # print("prediction: ", label, predict_points)
            return predict_points
        else:
            kf["filter"].predict()
        return None

# ans in VELODYNE axis
def FormCloud2bbox(cloud_new:np.array,cloud_last:np.array):
    cloud = np.vstack((cloud_new, cloud_last))
    distance = np.linalg.norm(cloud[:,:2], axis = 1)
    condition = (cloud[:, 0] > 0.1) & (cloud[:, 2] < 0.5) & (distance > 3)
    cloud = cloud[condition]
    cloud = np.dot(cloud, VELODYNE_MATRIX) + np.array([0, -1, -2])
    cloud_3d = open3d.geometry.PointCloud()
    cloud_3d.points=open3d.utility.Vector3dVector(cloud[:,:3])
    plane_modle, inliers = cloud_3d.segment_plane(distance_threshold=0.1,ransac_n=15,num_iterations=100)
    unground_cloud=cloud_3d.select_by_index(inliers,invert=True)
    labels=np.array( unground_cloud.cluster_dbscan(eps=1, min_points=10) )
    objects_clouds = []
    objects_center = []
    bboxs = []
    for label in np.unique(labels):
        if label == -1:
            continue
        cluster = unground_cloud.select_by_index(np.where(labels == label)[0])
        bbox = cluster.get_axis_aligned_bounding_box()

        if (bbox.max_bound[0] - bbox.min_bound[0]) < 5 and (bbox.max_bound[1] - bbox.min_bound[1]) < 5:
            objects_center.append(cluster.get_center())
            bboxs.append(bbox)
            objects_clouds.append(cluster)
    res = np.stack([np.concatenate([val1, val2.min_bound, val2.max_bound]) for val1, val2 in zip(objects_center, bboxs)], axis=0)
    return res


def get_predictions(obstacles, obstacle_with_locations):
    """Extracts obstacle predictions out of the message.
    This method is useful to build obstacle predictions when
    the operator directly receives detections instead of predictions.
    The method assumes that the obstacles are static.
    """
    predictions = []
    # Transform the obstacle into a prediction.
    for obstacle, location in zip(obstacles, obstacle_with_locations):
        obstacle = np.append(location, obstacle[-2:])
        predictions.append(obstacle)

    return predictions


class Operator:
    """
    Compute the location of obstacles, given 2D `bbox`, LIDAR point cloud and a position.
    """

    def __init__(self):
        self.point_cloud_full_front = []
        self.point_cloud_full_front = []
        self.point_cloud_full_front = []

        self.camera_point_cloud_full_front = []
        self.camera_point_cloud_full_left = []
        self.camera_point_cloud_full_right = []
        self.count = 0

        self.point_cloud_front = []
        self.point_cloud_left= []
        self.point_cloud_right = []
        self.camera_point_cloud_front = []
        self.camera_point_cloud_left = []
        self.camera_point_cloud_right = []
        self.ground_point_cloud = []
        self.camera_ground_point_cloud = []
        self.last_point_cloud_front = []
        self.last_point_cloud_left = []
        self.last_point_cloud_right = []
        self.last_camera_point_cloud_front = []
        self.last_camera_point_cloud_left = []
        self.last_camera_point_cloud_right = []
        self.obstacles = []
        self.obstacles_bbox = []
        self.position = []
        self.lanes = []
        self.kf = Predict_obstacle()
        self.temp = 0
        self.ans = []
        self.point_cloud_in_Lidar_axis = []
        self.last_point_cloud_in_Lidar_axis = []

    def on_event(
        self,
        dora_event: dict,
        send_output: Callable[[str, bytes], None],
    ) -> DoraStatus:
        if dora_event["type"] == "INPUT":
            return self.on_input(dora_event, send_output)
        return DoraStatus.CONTINUE

    def on_input(
        self,
        dora_input: dict,
        send_output: Callable[[str, bytes], None],
    ):
        if "lidar_pc" == dora_input["id"]:
            point_cloud = np.frombuffer(dora_input["data"], np.float32)
            point_cloud = point_cloud.reshape((-1, 3))



            self.last_point_cloud_in_Lidar_axis = self.point_cloud_in_Lidar_axis 
            self.point_cloud_in_Lidar_axis = point_cloud


            # From Velodyne axis to Camera axis
            # from Velodyne axis:
            # x -> forward, y -> right, z -> top
            # to Camera axis:
            # x -> right, y -> bottom, z -> forward
            point_cloud = np.dot(
                point_cloud,
                VELODYNE_MATRIX,
            ) + np.array([0, -1, -2])

            # Forward points only ( forward = z > 0.1 )
            point_cloud_raw_front = point_cloud[np.where(point_cloud[:, 2] > 1 )]
            point_cloud_raw_left = point_cloud[np.where(point_cloud[:, 0] < -1 )]
            point_cloud_raw_right = point_cloud[np.where(point_cloud[:, 0] > 1 )]

            # useful_index = np.where(point_cloud[:, 0] <-1) or np.where(point_cloud[:, 0] >1) or ( np.where(point_cloud[:, 0] < 1 ) and np.where(point_cloud[:, 0] >-1 and np.where(point_cloud[:, 2] > 1.5  )))  
            # point_cloud = point_cloud[useful_index]

            # Remove ground points. Above lidar only ( bottom = y < 1.0 )
            above_ground_point_index = np.where(point_cloud_raw_front[:, 1] < 1.6)
            self.ground_point_cloud = point_cloud_raw_front[
                above_ground_point_index == False
            ]
            point_cloud_raw_front = point_cloud_raw_front[above_ground_point_index]
            point_cloud_raw_left = point_cloud_raw_left[point_cloud_raw_left[:, 1] < 1.6]
            point_cloud_raw_right = point_cloud_raw_right[point_cloud_raw_right[:, 1] < 1.6]


            # 3D array -> 2D array with index_x -> pixel x, index_y -> pixel_y, value -> z
            self.CENTER2LEFT_MATRIX =  np.array([[0, 0, -1], [0 ,1 ,0], [1 ,0 ,0]]) #front caerma to left camera rotmatrix
            self.CENTER2RIGHT_MATRIX =  np.array([[0,0,1], [0 ,1 ,0], [-1,0,0]]) 

            point_cloud_front = point_cloud_raw_front
            point_cloud_left  = np.dot(point_cloud_raw_left,self.CENTER2LEFT_MATRIX)+ np.array([2, 0.42, -0.55])
            point_cloud_right = np.dot(point_cloud_raw_right,self.CENTER2RIGHT_MATRIX)+ np.array([-2, 0.42, -0.55])


            camera_point_cloud_front = local_points_to_camera_view(
                point_cloud_front, INTRINSIC_MATRIX
            )
            print(1)
            camera_point_cloud_left = local_points_to_camera_view(
                point_cloud_left, INTRINSIC_MATRIX
            )
            print(2)
            camera_point_cloud_right = local_points_to_camera_view(
                point_cloud_right, INTRINSIC_MATRIX
            )
            print(3)
            # print("========================xxxxxxxxxxxxxxxxxxxxxxxxxx", camera_point_cloud.shape)
            # np.savetxt("camera_point.txt", camera_point_cloud, delimiter=" ")
            self.camera_ground_point_cloud = local_points_to_camera_view(
                self.ground_point_cloud, INTRINSIC_MATRIX
            )
            print(4)
            

            if len(point_cloud_front) != 0:
                self.last_point_cloud_front = self.point_cloud_front
                self.last_camera_point_cloud_front = self.camera_point_cloud_front

                self.camera_point_cloud_front = camera_point_cloud_front.T
                self.point_cloud_front = point_cloud_front
                if len(self.last_point_cloud_front) > 0:
                    self.point_cloud_full_front = np.vstack(
                        (self.last_point_cloud_front, self.point_cloud_front)
                    )
                    self.camera_point_cloud_full_front = np.vstack(
                        (self.last_camera_point_cloud_front, self.camera_point_cloud_front)
                    )

            if len(point_cloud_left) != 0:
                self.last_point_cloud_left = self.point_cloud_left
                self.last_camera_point_cloud_left = self.camera_point_cloud_left

                self.camera_point_cloud_left = camera_point_cloud_left.T
                self.point_cloud_left = point_cloud_left
                if len(self.last_point_cloud_left) > 0:
                    self.point_cloud_full_left = np.vstack(
                        (self.last_point_cloud_left, self.point_cloud_left)
                    )
                    self.camera_point_cloud_full_left = np.vstack(
                        (self.last_camera_point_cloud_left, self.camera_point_cloud_left)
                    )

            if len(point_cloud_right) != 0:
                self.last_point_cloud_right = self.point_cloud_right
                self.last_camera_point_cloud_right = self.camera_point_cloud_right
                self.camera_point_cloud_right = camera_point_cloud_right.T
                self.point_cloud_right = point_cloud_right
                if len(self.last_point_cloud_right) > 0:
                    self.point_cloud_full_right = np.vstack(
                        (self.last_point_cloud_right, self.point_cloud_right)
                    )
                    self.camera_point_cloud_full_right = np.vstack(
                        (self.last_camera_point_cloud_right, self.camera_point_cloud_right)
                    )
                #     self.point_cloud_all= np.vstack((self.point_cloud,self.last_point_cloud))
                #     self.camera_point_cloud_all= np.vstack((self.camera_point_cloud,self.last_camera_point_cloud))
                # else:
                #     self.point_cloud_all= self.point_cloud
                #     self.camera_point_cloud_all= self.camera_point_cloud


        elif "position" == dora_input["id"]:
            # Add sensor transform
            self.position = np.frombuffer(dora_input["data"], np.float32)
            # consider camera relative position to vehicle
            vehicle_T_camera = np.identity(4)
            vehicle_T_camera[:3, 3] = CAMERA_POSITION[:3]
            world_T_vehicle = get_projection_matrix(self.position)
            world_T_camera = np.dot(world_T_vehicle, vehicle_T_camera)
            # get camera extrinsic matrix
            self.extrinsic_matrix = get_extrinsic_matrix(
                world_T_camera
            )

        elif "lanes" == dora_input["id"]:
            lanes = np.frombuffer(dora_input["data"], np.int32).reshape(
                (-1, 60, 2)
            )

            knnr = KNeighborsRegressor(n_neighbors=4)
            knnr.fit(
                self.camera_ground_point_cloud[:, :2], self.ground_point_cloud
            )

            processed_lanes = []
            for lane in lanes:
                lane_location = knnr.predict(lane)
                lane_location = np.array(lane_location)

                lane_location = np.hstack(
                    (
                        lane_location,
                        np.ones((lane_location.shape[0], 1)),
                    )
                )
                lane_location = np.dot(lane_location, self.extrinsic_matrix.T)[
                    :, :3
                ]
                processed_lanes.append(lane_location)
            processed_lanes = np.array(processed_lanes, np.float32).tobytes()

            send_output("global_lanes", processed_lanes, dora_input["metadata"])

        elif "obstacles_left" == dora_input["id"]:
            # update whole frames num
            self.kf.frames += 1
            print(f"start obstacles left ", os.getcwd())
            if len(self.position) == 0 or len(self.point_cloud_front) == 0 : 
                print("none ppppp")
                return DoraStatus.CONTINUE
            # try:
            #     print(f"self.point_cloud: {self.point_cloud.shape}, {self.point_cloud[0]}")
            #     # (966487, 3), [  6.11811495 -13.43523693  75.94897461]
            # except:
            #     print(f"show self.point_cloud failed!")
            # bbox = np.array([[min_x, max_x, min_y, max_y, confidence, label], ... n_bbox ... ])

            # print("xxxxxxxxxxxxxxdataxxxxxxxx", np.frombuffer(dora_input["data"], np.int32), np.frombuffer(dora_input["data"], np.int32).shape)

            
            try:
                self.obstacles_bbox = np.frombuffer(
                dora_input["data"], np.int32
            )
                print("input: ", self.obstacles_bbox)
                self.obstacles_bbox = self.obstacles_bbox.reshape((3, -1, 6))
                z_points = []
                obstacles_with_location = []

                label_list = []
                confidence_list = []

                for obstacle_bb in self.obstacles_bbox[0]: # front
                    [min_x, max_x, min_y, max_y, confidence, label] = obstacle_bb
                    if label <= 6:
                        center_x = (min_x + max_x) / 2
                        center_y = (min_y + max_y) / 2
                        lengh_x = abs(max_x - min_x)
                        lengh_y = abs(max_y - min_y)
                        z_points = self.point_cloud_full_front[
                            np.where(
                                (self.camera_point_cloud_full_front[:, 0] > (center_x - lengh_x / 4))  
                                & (self.camera_point_cloud_full_front[:, 0] < (center_x + lengh_x / 4))
                                & (self.camera_point_cloud_full_front[:, 1] > (center_y - lengh_y / 4))
                                & (self.camera_point_cloud_full_front[:, 1] < (center_y - lengh_y / 4))
                            )
                        ]
                        print("center")
                        if len(z_points) == 0 or len(z_points) <= 2:
                            z_points = self.point_cloud_full_front[
                                np.where(
                                    (self.camera_point_cloud_full_front[:, 0] > min_x)  
                                    & (self.camera_point_cloud_full_front[:, 0] < max_x)
                                    & (self.camera_point_cloud_full_front[:, 1] > min_y)
                                    & (self.camera_point_cloud_full_front[:, 1] < max_y)
                                )
                            ]
                        if len(z_points) > 0:
                            closest_point = z_points[
                                z_points[:, 2].argsort()[int(len(z_points) / 4)]
                            ] # zui jin de shendu
                            if closest_point[2]>1000:
                                print("error-------------------------")
                                print(closest_point)
                            obstacles_with_location.append(closest_point)
                            label_list.append(label)
                            confidence_list.append(confidence)
                     

                for obstacle_bb in self.obstacles_bbox[1]: # right
                    [min_x, max_x, min_y, max_y, confidence, label] = obstacle_bb
                    print("right")
                    center_x = (min_x + max_x) / 2
                    center_y = (min_y + max_y) / 2
                    lengh_x = abs(max_x - min_x)
                    lengh_y = abs(max_y - min_y)
                    if label<=6:
                        z_points_right = self.point_cloud_full_right[
                            np.where(
                                (self.camera_point_cloud_full_right[:, 0] > (center_x - 0.1))  
                                & (self.camera_point_cloud_full_right[:, 0] < (center_x + 0.1))
                                & (self.camera_point_cloud_full_right[:, 1] > (center_y - 0.1))
                                & (self.camera_point_cloud_full_right[:, 1] < (center_y - 0.1))
                            )
                        ]
                        print("center")
                        if len(z_points_right) == 0 or len(z_points_right) <= 2:
                            z_points_right = self.point_cloud_full_right[
                                np.where(
                                    (self.camera_point_cloud_full_right[:, 0] > min_x)  
                                    & (self.camera_point_cloud_full_right[:, 0] < max_x)
                                    & (self.camera_point_cloud_full_right[:, 1] > min_y)
                                    & (self.camera_point_cloud_full_right[:, 1] < max_y)
                                )
                            ]
                        right_point  =self.point_cloud_full_right[np.where(self.point_cloud_full_right[:, 0] < 2 ) and 
                                            np.where(self.point_cloud_full_right[:, 0] >-2 ) and np.where(self.point_cloud_full_right[:, 2] < 2.5)]
                        if len(right_point) > 0:
                            right_point =  right_point - np.array([-2, 0.42, -0.55])
                            right_point = np.dot(right_point, self.CENTER2LEFT_MATRIX)
                            closest_point = right_point[
                                right_point[:, 2].argsort()[int(len(right_point) / 4)]
                            ] 
                            obstacles_with_location.append(closest_point)
                            label_list.append(label)
                            confidence_list.append(confidence)
                        # self.count =  self.count + 1
                        # np.save("/home/dora/workspace/simulate/team_code/show/"+str(self.count), self.point_cloud_full_left)
                        # print("AAAADDDD", [min_x, max_x, min_y, max_y, confidence, label] ) 
                        # # print(self.point_cloud_full_left)
                        # print(len(z_points_right)) 
                        if len(z_points_right) > 0:
                            z_points_right =  z_points_right - np.array([-2, 0.42, -0.55])
                            z_points_right = np.dot(z_points_right, self.CENTER2LEFT_MATRIX )
                            closest_point = z_points_right[
                                z_points_right[:, 2].argsort()[int(len(z_points_right) / 4)]
                            ] # zui jin de shendu
                            print("right cloest: ", closest_point)
                            obstacles_with_location.append(closest_point)
                            label_list.append(label)
                            confidence_list.append(confidence)



                for obstacle_bb in self.obstacles_bbox[2]: # left
                    [min_x, max_x, min_y, max_y, confidence, label] = obstacle_bb
                    print("left")
                    center_x = (min_x + max_x) / 2
                    center_y = (min_y + max_y) / 2
                    lengh_x = abs(max_x - min_x)
                    lengh_y = abs(max_y - min_y)
                    if label <= 6:
                        z_points_left = self.point_cloud_full_left[
                            np.where(
                                (self.camera_point_cloud_full_left[:, 0] > (center_x - lengh_x / 4))  
                                & (self.camera_point_cloud_full_left[:, 0] < (center_x + lengh_x / 4))
                                & (self.camera_point_cloud_full_left[:, 1] > (center_y - lengh_y / 4))
                                & (self.camera_point_cloud_full_left[:, 1] < (center_y - lengh_y / 4))
                            )
                        ]
                        print("center")
                        if len(z_points_left) == 0 or len(z_points_left) <= 2:
                            z_points_left = self.point_cloud_full_left[
                                np.where(
                                    (self.camera_point_cloud_full_left[:, 0] > min_x)  
                                    & (self.camera_point_cloud_full_left[:, 0] < max_x)
                                    & (self.camera_point_cloud_full_left[:, 1] > min_y)
                                    & (self.camera_point_cloud_full_left[:, 1] < max_y)
                                ) 
                            ]

                        left_point  =self.point_cloud_full_left[np.where(self.point_cloud_full_left[:, 0] < 2 ) and 
                                        np.where(self.point_cloud_full_left[:, 0] >-2 ) and np.where(self.point_cloud_full_left[:, 2] < 2.5)]
                        if len(left_point) > 0:
                            left_point =  left_point - np.array([2, 0.42, -0.55])
                            left_point = np.dot(left_point, self.CENTER2RIGHT_MATRIX )
                            closest_point = left_point[
                                left_point[:, 2].argsort()[int(len(left_point) / 4)]
                            ] 
                            obstacles_with_location.append(closest_point)
                            label_list.append(label)
                            confidence_list.append(confidence)
                        # self.count =  self.count + 1
                        # np.save("/home/dora/workspace/simulate/team_code/show/"+str(self.count), self.point_cloud_full_left)
                        # print("AAAADDDD", [min_x, max_x, min_y, max_y, confidence, label] ) 
                        # # print(self.point_cloud_full_left)
                        # print(len(z_points_left)) 
                        if len(z_points_left) > 0:
                            z_points_left =  z_points_left - np.array([2, 0.42, -0.55])
                            z_points_left = np.dot(z_points_left, self.CENTER2RIGHT_MATRIX )
                            closest_point = z_points_left[
                                z_points_left[:, 2].argsort()[int(len(z_points_left) / 4)]
                            ] # zui jin de shendu
                            # print("cloest: ", closest_point)
                            obstacles_with_location.append(closest_point)
                            label_list.append(label)
                            confidence_list.append(confidence)

                    #return DoraStatus.CONTINUE
                obstacles_prediction_res = []
                obstacle_res = []
                
                if len(obstacles_with_location) > 0:
                    obstacles_with_location = np.array(obstacles_with_location)
                    obstacles_with_location = np.hstack(
                        (
                            obstacles_with_location,
                            np.ones((obstacles_with_location.shape[0], 1)),
                        )
                    )
                    obstacles_with_location = np.dot(
                        obstacles_with_location, self.extrinsic_matrix.T
                    )[:, :3]
                
                    # print(obstacles_with_location.shape, " shape")
                    for o in obstacles_with_location:
                        if o[2]>1000:
                            print("1 error-------------------------")
                            print(o)
                    # predict v1
                    if WITH_KALMAN:
                        for obstacle, confidence, location in zip(label_list, confidence_list, obstacles_with_location):
                            obstacle = np.append(location, obstacle)
                            min_dis = 1000
                            min_obstacle = [obstacle[0], obstacle[1], obstacle[2]]
                            
                            obstacle_res.append(obstacle)
                            label  = obstacle[-1]
                            # if label > 6:
                            #     prediction_res = {
                            #         "actor_id" : label,
                            #         "actor_pos" : [np.array([obstacle[0], obstacle[1], obstacle[2]]) for i in range(20)],
                            #         "actor_velocity" : [np.array([0., 0.]) for i in range(20)]
                            #     }
                            #     obstacles_prediction_res.append(prediction_res)
                            #     print("pre : ", prediction_res)
                            # elif confidence > 0.39: # filter other obstacle
                            #     res = self.kf.predict(obstacle[-1], min_obstacle[0], min_obstacle[1], min_obstacle[2])
                            #     if res is not None:
                            #         pos = [i[:3] for i in res]
                            #         v = [i[3:] for i in res]
                            #         prediction_res = {
                            #             "actor_id" : label,
                            #             "actor_pos" : pos,
                            #             "actor_velocity" : v
                            #         }
                            #         obstacles_prediction_res.append(prediction_res)
                            #         print("label : ", prediction_res)
                            
                            if label<6: # filter other obstacle
                                res = self.kf.predict(obstacle[-1], min_obstacle[0], min_obstacle[1], min_obstacle[2])
                                if res is not None:
                                    pos = [i[:3] for i in res]
                                    v = [i[3:] for i in res]
                                    prediction_res = {
                                        "actor_id" : label,
                                        "actor_pos" : pos,
                                        "actor_velocity" : v
                                    }
                                    obstacles_prediction_res.append(prediction_res)
                                    print("label : ", prediction_res)

                if len(obstacles_with_location) > 0:
                    predictions = get_predictions(
                        self.obstacles_bbox[0], obstacles_with_location
                    )
                    
                    # predictions_bytes = np.array(obstacle_res, np.float32).tobytes()
                    predictions_bytes = np.array(obstacles_with_location, np.float32).tobytes()
                    # try:
                    #     print(f"predictions_bytes: {type(predictions_bytes)}, {np.array(predictions, np.float32).shape}, {np.array(predictions, np.float32)[0]}")
                    #     # <class 'bytes'>, (3, 5), [ 75.762566  115.856514    2.2158468  39.          2.       ]
                    # except:
                    #     print(f"show predictions failed!")
                    send_output(
                        "obstacles", np.array(predictions, np.float32).tobytes(), dora_input["metadata"]
                    )
                else:
                    send_output(
                        "obstacles", np.array([]).tobytes(), dora_input["metadata"]
                    )
                
                if len(obstacles_prediction_res) > 0:
                    obstacles_prediction_res_bytes = pickle.dumps(obstacles_prediction_res)
                    send_output(
                        "obstacles_prediction_res", obstacles_prediction_res_bytes, dora_input["metadata"]
                    )
                else:
                    send_output(
                        "obstacles_prediction_res", pickle.dumps(np.array([])), dora_input["metadata"]
                    )
                
                # self.kf.update()    
            except Exception as e:
                print("Catch error: ", e)
                
                

        return DoraStatus.CONTINUE
        # elif "obstacles_bbox_lidar" == dora_input["id"]:

            # if len(self.position) == 0 or len(self.point_cloud_in_Lidar_axis) == 0:
            #     return DoraStatus.CONTINUE
            #     ans = FormCloud2bbox(self.point_cloud_in_Lidar_axis,self.last_point_cloud_in_Lidar_axis)
            #     send_output("obstacles_bbox_lidar", ans.tobytes(), dora_input["metadata"])
        
            # elif "obstacles_bbox_lidar" == dora_input["id"]:

                # if len(self.position) == 0 or len(self.point_cloud_in_Lidar_axis) == 0:
                #     return DoraStatus.CONTINUE
                #     ans = FormCloud2bbox(self.point_cloud_in_Lidar_axis,self.last_point_cloud_in_Lidar_axis)
                #     send_output("obstacles_bbox_lidar", ans.tobytes(), dora_input["metadata"])
       
"""
elif "obstacles_left" == dora_input["id"]:
            # update whole frames num
            self.kf.frames += 1
            print(f"start obstacles left")
            if len(self.position) == 0 or len(self.point_cloud) == 0 or len(self.point_cloud_full)==0: 
                print("none ppppp")
                return DoraStatus.CONTINUE
            # try:
            #     print(f"self.point_cloud: {self.point_cloud.shape}, {self.point_cloud[0]}")
            #     # (966487, 3), [  6.11811495 -13.43523693  75.94897461]
            # except:
            #     print(f"show self.point_cloud failed!")
            # bbox = np.array([[min_x, max_x, min_y, max_y, confidence, label], ... n_bbox ... ])
            self.obstacles_bbox = np.frombuffer(
                dora_input["data"], np.int32
            ).reshape((3, -1, 6))
            print("obs: ", self.obstacles_bbox)
            
            z_points = []
            obstacles_with_location = []
            for obstacle_bb in self.obstacles_bbox[0]: # front
                [min_x, max_x, min_y, max_y, confidence, label] = obstacle_bb
                print("center")
                z_points = self.point_cloud_full[
                    np.where(
                        (self.camera_point_cloud_full[:, 0] > min_x)  
                        & (self.camera_point_cloud_full[:, 0] < max_x)
                        & (self.camera_point_cloud_full[:, 1] > min_y)
                        & (self.camera_point_cloud_full[:, 1] < max_y)
                    )
                ]
                if len(z_points) > 0:
                    closest_point = z_points[
                        z_points[:, 2].argsort()[int(len(z_points) / 4)]
                    ] # zui jin de shendu
                    if closest_point[2]>1000:
                        print("error-------------------------")
                        print(closest_point)
                    obstacles_with_location.append(closest_point)
            for obstacle_bb in self.obstacles_bbox[1]: # front
                [min_x, max_x, min_y, max_y, confidence, label] = obstacle_bb
                print("right")
                pass
                    # z_points = self.point_cloud_full[
                    #     np.where(
                    #         (self.camera_point_cloud_full_right[:, 0] > min_x)  
                    #         & (self.camera_point_cloud_full_right[:, 0] < max_x)
                    #         & (self.camera_point_cloud_full_right[:, 1] > min_y)
                    #         & (self.camera_point_cloud_full_right[:, 1] < max_y)
                    #     )
                    # ]
            for obstacle_bb in self.obstacles_bbox[2]: # front
                [min_x, max_x, min_y, max_y, confidence, label] = obstacle_bb
                print("left")
                pass
                # z_points = self.point_cloud_full[
                #     np.where(
                #         (self.camera_point_cloud_full_left[:, 0] > min_x)  
                #         & (self.camera_point_cloud_full_left[:, 0] < max_x)
                #         & (self.camera_point_cloud_full_left[:, 1] > min_y)
                #         & (self.camera_point_cloud_full_left[:, 1] < max_y)
                #     )
                # ]
                #return DoraStatus.CONTINUE
                
            obstacles_prediction_res = []
            # print("==================ans: ", len(self.point_cloud_in_Lidar_axis)," ", len(self.last_point_cloud_in_Lidar_axis))
            if  len(self.point_cloud_in_Lidar_axis) != 0 and len(self.last_point_cloud_in_Lidar_axis) != 0:
                self.ans = FormCloud2bbox(self.point_cloud_in_Lidar_axis,self.last_point_cloud_in_Lidar_axis)
                self.ans[:,0:3] = np.dot( self.ans[:,0:3] , self.extrinsic_matrix.T[0:3,0:3] )
                self.ans[:,3:6] = np.dot( self.ans[:,3:6] , self.extrinsic_matrix.T[0:3,0:3] )
                self.ans[:,6:9] = np.dot( self.ans[:,6:9] , self.extrinsic_matrix.T[0:3,0:3] )
                trans = self.extrinsic_matrix[:3,3]
                for i in range(self.ans.shape[1]):
                    self.ans[:, i] = self.ans[:, i] + trans[i % trans.shape[0]]

            # trans = self.extrinsic_matrix.T[:3,3]
            # print("trans ", trans,self.extrinsic_matrix)
            # print("self.ans: ", self.ans[1,:])

            obstacle_res = []
            if len(obstacles_with_location) > 0:
                obstacles_with_location = np.array(obstacles_with_location)
                obstacles_with_location = np.hstack(
                    (
                        obstacles_with_location,
                        np.ones((obstacles_with_location.shape[0], 1)),
                    )
                )
                obstacles_with_location = np.dot(
                    obstacles_with_location, self.extrinsic_matrix.T
                )[:, :3]
                # predict v1
                if WITH_KALMAN:
                    for obstacle, location in zip(self.obstacles_bbox, obstacles_with_location):
                        # if obstacle[-2] < OBSTABLE_CONFIDENCE_THRESHOLD: # Filter out low confidence obstacles
                        #     continue
                        obstacle = np.append(location, obstacle[-2:])
                        min_dis = 1000
                        min_obstacle = [obstacle[0], obstacle[1], obstacle[2]]
                        for pos_idx in range(len(self.ans)): #[]
                            pos1 = self.ans[pos_idx][:3]
                            pos2 = self.ans[pos_idx][3:6]
                            pos3 = self.ans[pos_idx][6:]
                            min_dis_temp = min((pow((obstacle[0] - pos1[0]), 2) + pow((obstacle[1] - pos1[1]), 2) + pow((obstacle[2] - pos1[2]), 2)), 
                            (pow((obstacle[0] - pos2[0]), 2) + pow((obstacle[1] - pos2[1]), 2) + pow((obstacle[2] - pos2[2]), 2)),
                            (pow((obstacle[0] - pos3[0]), 2) + pow((obstacle[1] - pos3[1]), 2) + pow((obstacle[2] - pos3[2]), 2)))
                            if min_dis_temp < min_dis and min_dis_temp < 2:
                                # print("change obstacle -------------------------------------------")
                                min_dis = min_dis_temp
                                min_obstacle = pos1
                        
                        obstacle[:3] = min_obstacle[:3]
                        print(obstacle[-1], " ================================min: ", min_obstacle, [obstacle[0], obstacle[1], obstacle[2]])
                        obstacle_res.append(obstacle)
                        label  = obstacle[-1]
                        if label < 600: # filter other obstacle
                            res = self.kf.predict(obstacle[-1], min_obstacle[0], min_obstacle[1], min_obstacle[2])
                            if res is not None:
                                pos = np.array([i[:3] for i in res])
                                v = np.array([i[3:] for i in res])
                                prediction_res = {
                                    "actor_id" : label,
                                    "actor_pos" : pos,
                                    "actor_velocity" : v
                                }
                                obstacles_prediction_res.append(prediction_res)
                                # print("label : ", label)
                        
                predictions = get_predictions(
                    self.obstacles_bbox, obstacles_with_location
                )
                for p in predictions:
                    if p[2]>1000:
                        print("2 error-------------------------")
                        print(p)
                # predictions_bytes = np.array(obstacle_res, np.float32).tobytes()
                predictions_bytes = np.array(obstacles_with_location, np.float32).tobytes()
                # try:
                #     print(f"predictions_bytes: {type(predictions_bytes)}, {np.array(predictions, np.float32).shape}, {np.array(predictions, np.float32)[0]}")
                #     # <class 'bytes'>, (3, 5), [ 75.762566  115.856514    2.2158468  39.          2.       ]
                # except:
                #     print(f"show predictions failed!")
                send_output(
                    "obstacles", np.array(predictions, np.float32).tobytes(), dora_input["metadata"]
                )
            else:
                send_output(
                    "obstacles", np.array([]).tobytes(), dora_input["metadata"]
                )
            
            if len(obstacles_prediction_res) > 0:
                obstacles_prediction_res_bytes = pickle.dumps(obstacles_prediction_res)
                send_output(
                    "obstacles_prediction_res", obstacles_prediction_res_bytes, dora_input["metadata"]
                )
            else:
                send_output(
                    "obstacles_prediction_res", pickle.dumps(np.array([])), dora_input["metadata"]
                )
"""