from typing import Callable

import numpy as np
from dora import DoraStatus
from dora_utils import (
    get_extrinsic_matrix,
    get_intrinsic_matrix,
    get_projection_matrix,
    local_points_to_camera_view,
)
from sklearn.neighbors import KNeighborsRegressor
import cv2
import math

import pickle
import open3d

DEPTH_IMAGE_WIDTH = 1920
DEPTH_IMAGE_HEIGHT = 1080
DEPTH_FOV = 90
CAMERA_POSITION = np.array([2.0, 0, 1.0, 0, 0, 0])
INTRINSIC_MATRIX = get_intrinsic_matrix(
    DEPTH_IMAGE_WIDTH, DEPTH_IMAGE_HEIGHT, DEPTH_FOV
)

INV_INTRINSIC_MATRIX = np.linalg.inv(INTRINSIC_MATRIX)
VELODYNE_MATRIX = np.array([[0, 0, 1], [1, 0, 0], [0, -1, 0]])
UNREAL_MATRIX = np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
INV_UNREAL_MATRIX = np.linalg.inv(UNREAL_MATRIX)
INV_VELODYNE_MATRIX = np.linalg.inv(VELODYNE_MATRIX)


class Predict_obstacle:
    def __init__(self):
        self.kf = {}
        self.dt = 0.1
        self.diff_threshold = 0.5
        self.frames = 0
    def distance_compute(self, point1, point2):
        return math.sqrt(math.pow(point1[0]-point2[0], 2)+math.pow(point1[1]-point2[1], 2)) > self.diff_threshold
    def create_kf(self, label):
        if label not in self.kf:
            kf = cv2.KalmanFilter(4, 2)
            kf.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32) 
            kf.transitionMatrix = np.array([[1, 0, self.dt, 0], [0, 1, 0, self.dt], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
            kf.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32)  * 1e-3
            self.kf[label] = []
            self.kf[label].append({"filter": kf, "frames": 0, "update": False, "points": []})
        else:
            kf = cv2.KalmanFilter(4, 2)
            kf.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
            kf.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
            kf.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32) *1e-3
            self.kf[label].append({"filter": kf, "frames": 0, "update": False, "points": []})
    def correct(self, kf, coordX, coordY, coordZ):
        measured = np.array([[np.float32(coordX)], [np.float32(coordY)]])
        kf["filter"].correct(measured)
        kf["frames"] += 1
        kf["points"].append([coordX, coordY, coordZ])
        kf["update"] = True
    def update(self):
        for kf_group in self.kf:
            new_group = []
            for kf in self.kf[kf_group]:
                if kf["update"]:
                    kf["update"] = False
                    new_group.append(kf)
            self.kf[kf_group] = new_group
    def predict(self, label, coordX, coordY, coordZ):
        if label not in self.kf:
            self.create_kf(label)
            kf = self.kf[label][-1]
        else:
            for k in self.kf[label]:
                if not self.distance_compute(k["points"][-1], [coordX, coordY]):
                    kf = k
                    break
            else:
                self.create_kf(label)
                kf = self.kf[label][-1]
        self.correct(kf, coordX, coordY, coordZ)
        
        predict_points = []
        if kf["frames"] > 5:
            z_avg = sum([i[2] for i in kf["points"][-3:]])/3
            for idx in range(20):
                predicted = kf["filter"].predict()
                x, y = predicted[0], predicted[1]
                predict_points.append([float(x), float(y), z_avg, float(predicted[2]), float(predicted[3])])
            return predict_points
        else:
            kf["filter"].predict()
        return None

# ans in VELODYNE axis
def FormCloud2bbox(cloud_new:np.array,cloud_last:np.array):
    cloud = np.vstack((cloud_new, cloud_last))
    distance = np.linalg.norm(cloud[:,:2], axis = 1)
    condition = (cloud[:, 0] > 0.1) & (cloud[:, 2] < 0.5) & (distance > 3)
    cloud = cloud[condition]
    cloud = np.dot(cloud, VELODYNE_MATRIX) + np.array([0, -1, -2])
    cloud_3d = open3d.geometry.PointCloud()
    cloud_3d.points=open3d.utility.Vector3dVector(cloud[:,:3])
    plane_modle, inliers = cloud_3d.segment_plane(distance_threshold=0.1,ransac_n=15,num_iterations=100)
    unground_cloud=cloud_3d.select_by_index(inliers,invert=True)
    labels=np.array( unground_cloud.cluster_dbscan(eps=1, min_points=10) )
    objects_clouds = []
    objects_center = []
    bboxs = []
    for label in np.unique(labels):
        if label == -1:
            continue
        cluster = unground_cloud.select_by_index(np.where(labels == label)[0])
        bbox = cluster.get_axis_aligned_bounding_box()

        if (bbox.max_bound[0] - bbox.min_bound[0]) < 5 and (bbox.max_bound[1] - bbox.min_bound[1]) < 5:
            objects_center.append(cluster.get_center())
            bboxs.append(bbox)
            objects_clouds.append(cluster)
    res = np.stack([np.concatenate([val1, val2.min_bound, val2.max_bound]) for val1, val2 in zip(objects_center, bboxs)], axis=0)
    return res


def get_predictions(obstacles, obstacle_with_locations):
    """Extracts obstacle predictions out of the message.
    This method is useful to build obstacle predictions when
    the operator directly receives detections instead of predictions.
    The method assumes that the obstacles are static.
    """
    predictions = []
    # Transform the obstacle into a prediction.
    for obstacle, location in zip(obstacles, obstacle_with_locations):
        obstacle = np.append(location, obstacle[-2:])
        predictions.append(obstacle)

    return predictions


class Operator:
    """
    Compute the location of obstacles, given 2D `bbox`, LIDAR point cloud and a position.
    """

    def __init__(self):
        self.point_cloud_full = []
        self.camera_point_cloud_full = []

        self.point_cloud = []
        self.camera_point_cloud = []
        self.ground_point_cloud = []
        self.camera_ground_point_cloud = []
        self.last_point_cloud = []
        self.last_camera_point_cloud = []
        self.obstacles = []
        self.obstacles_bbox = []
        self.position = []
        self.lanes = []
        self.kf = Predict_obstacle()
        self.temp = 0
        self.ans = []
        self.point_cloud_in_Lidar_axis = []
        self.last_point_cloud_in_Lidar_axis = []

    def on_event(
        self,
        dora_event: dict,
        send_output: Callable[[str, bytes], None],
    ) -> DoraStatus:
        if dora_event["type"] == "INPUT":
            return self.on_input(dora_event, send_output)
        return DoraStatus.CONTINUE

    def on_input(
        self,
        dora_input: dict,
        send_output: Callable[[str, bytes], None],
    ):
        if "lidar_pc" == dora_input["id"]:
            point_cloud = np.frombuffer(dora_input["data"], np.float32)
            point_cloud = point_cloud.reshape((-1, 3))



            self.last_point_cloud_in_Lidar_axis = self.point_cloud_in_Lidar_axis 
            self.point_cloud_in_Lidar_axis = point_cloud


            # From Velodyne axis to Camera axis
            # from Velodyne axis:
            # x -> forward, y -> right, z -> top
            # to Camera axis:
            # x -> right, y -> bottom, z -> forward
            point_cloud = np.dot(
                point_cloud,
                VELODYNE_MATRIX,
            ) + np.array([0, -1, -2])

            # Forward points only ( forward = z > 0.1 )
            point_cloud = point_cloud[np.where(point_cloud[:, 2] > 0.1)]

            # Remove ground points. Above lidar only ( bottom = y < 1.0 )
            above_ground_point_index = np.where(point_cloud[:, 1] < 1.8)

            self.ground_point_cloud = point_cloud[
                above_ground_point_index == False
            ]

            point_cloud = point_cloud[above_ground_point_index]

            # 3D array -> 2D array with index_x -> pixel x, index_y -> pixel_y, value -> z
            camera_point_cloud = local_points_to_camera_view(
                point_cloud, INTRINSIC_MATRIX
            )
            self.camera_ground_point_cloud = local_points_to_camera_view(
                self.ground_point_cloud, INTRINSIC_MATRIX
            )
            

            if len(point_cloud) != 0:
                self.last_point_cloud = self.point_cloud
                self.last_camera_point_cloud = self.camera_point_cloud
                self.camera_point_cloud = camera_point_cloud.T
                self.point_cloud = point_cloud
                if len(self.last_point_cloud) > 0:
                    self.point_cloud_full = np.vstack(
                        (self.last_point_cloud, self.point_cloud)
                    )
                    self.camera_point_cloud_full = np.vstack(
                        (self.last_camera_point_cloud, self.camera_point_cloud)
                    )



        elif "position" == dora_input["id"]:
            # Add sensor transform
            self.position = np.frombuffer(dora_input["data"], np.float32)
            # consider camera relative position to vehicle
            vehicle_T_camera = np.identity(4)
            vehicle_T_camera[:3, 3] = CAMERA_POSITION[:3]
            world_T_vehicle = get_projection_matrix(self.position)
            world_T_camera = np.dot(world_T_vehicle, vehicle_T_camera)
            # get camera extrinsic matrix
            self.extrinsic_matrix = get_extrinsic_matrix(
                world_T_camera
            )

        elif "lanes" == dora_input["id"]:
            lanes = np.frombuffer(dora_input["data"], np.int32).reshape(
                (-1, 60, 2)
            )

            knnr = KNeighborsRegressor(n_neighbors=4)
            knnr.fit(
                self.camera_ground_point_cloud[:, :2], self.ground_point_cloud
            )

            processed_lanes = []
            for lane in lanes:
                lane_location = knnr.predict(lane)
                lane_location = np.array(lane_location)

                lane_location = np.hstack(
                    (
                        lane_location,
                        np.ones((lane_location.shape[0], 1)),
                    )
                )
                lane_location = np.dot(lane_location, self.extrinsic_matrix.T)[
                    :, :3
                ]
                processed_lanes.append(lane_location)
            processed_lanes = np.array(processed_lanes, np.float32).tobytes()

            send_output("global_lanes", processed_lanes, dora_input["metadata"])

        elif "obstacles_bbox" == dora_input["id"]:
            # update whole frames num
            self.kf.frames += 1

            if len(self.position) == 0 or len(self.point_cloud) == 0 or len(self.point_cloud_full)==0: 
                return DoraStatus.CONTINUE

            self.obstacles_bbox = np.frombuffer(
                dora_input["data"], np.int32
            ).reshape((-1, 6))

            obstacles_with_location = []
            for obstacle_bb in self.obstacles_bbox:
                [min_x, max_x, min_y, max_y, confidence, label] = obstacle_bb
    
                z_points = self.point_cloud_full[
                    np.where(
                        (self.camera_point_cloud_full[:, 0] > min_x)  
                        & (self.camera_point_cloud_full[:, 0] < max_x)
                        & (self.camera_point_cloud_full[:, 1] > min_y)
                        & (self.camera_point_cloud_full[:, 1] < max_y)
                    )
                ]

                if len(z_points) > 0:
                    closest_point = z_points[
                        z_points[:, 2].argsort()[int(len(z_points) / 4)]
                    ] # zui jin de shendu
                    obstacles_with_location.append(closest_point)
            obstacles_prediction_res = []
            if  len(self.point_cloud_in_Lidar_axis) != 0 and len(self.last_point_cloud_in_Lidar_axis) != 0:
                self.ans = FormCloud2bbox(self.point_cloud_in_Lidar_axis,self.last_point_cloud_in_Lidar_axis)
                self.ans[:,0:3] = np.dot( self.ans[:,0:3] , self.extrinsic_matrix.T[0:3,0:3] )
                self.ans[:,3:6] = np.dot( self.ans[:,3:6] , self.extrinsic_matrix.T[0:3,0:3] )
                self.ans[:,6:9] = np.dot( self.ans[:,6:9] , self.extrinsic_matrix.T[0:3,0:3] )
                trans = self.extrinsic_matrix[:3,3]
                for i in range(self.ans.shape[1]):
                    self.ans[:, i] = self.ans[:, i] + trans[i % trans.shape[0]]


            obstacle_res = []
            if len(obstacles_with_location) > 0:
                obstacles_with_location = np.array(obstacles_with_location)
                obstacles_with_location = np.hstack(
                    (
                        obstacles_with_location,
                        np.ones((obstacles_with_location.shape[0], 1)),
                    )
                )
                obstacles_with_location = np.dot(
                    obstacles_with_location, self.extrinsic_matrix.T
                )[:, :3]
                # predict v1
                for obstacle, location in zip(self.obstacles_bbox, obstacles_with_location):
                    obstacle = np.append(location, obstacle[-2:])
                    min_dis = 1000
                    min_obstacle = [obstacle[0], obstacle[1], obstacle[2]]
                    for pos_idx in range(len(self.ans)): #[]
                        pos1 = self.ans[pos_idx][:3]
                        pos2 = self.ans[pos_idx][3:6]
                        pos3 = self.ans[pos_idx][6:]
                        min_dis_temp = min((pow((obstacle[0] - pos1[0]), 2) + pow((obstacle[1] - pos1[1]), 2) + pow((obstacle[2] - pos1[2]), 2)), 
                        (pow((obstacle[0] - pos2[0]), 2) + pow((obstacle[1] - pos2[1]), 2) + pow((obstacle[2] - pos2[2]), 2)),
                        (pow((obstacle[0] - pos3[0]), 2) + pow((obstacle[1] - pos3[1]), 2) + pow((obstacle[2] - pos3[2]), 2)))
                        if min_dis_temp < min_dis and min_dis_temp < 2:
                            min_dis = min_dis_temp
                            min_obstacle = pos1
                    
                    obstacle[:3] = min_obstacle[:3]
                    obstacle_res.append(obstacle)
                    label  = obstacle[-1]
                    if label < 6: # filter other obstacle
                        res = self.kf.predict(obstacle[-1], min_obstacle[0], min_obstacle[1], min_obstacle[2])
                        if res is not None:
                            pos = [i[:3] for i in res]
                            v = [i[3:] for i in res]
                            prediction_res = {
                                "actor_id" : label,
                                "actor_pos" : pos,
                                "actor_velocity" : v
                            }
                            obstacles_prediction_res.append(prediction_res)

                predictions_bytes = np.array(obstacle_res, np.float32).tobytes()

                send_output(
                    "obstacles", predictions_bytes, dora_input["metadata"]
                )
            else:
                send_output(
                    "obstacles", np.array([]).tobytes(), dora_input["metadata"]
                )
            
            if len(obstacles_prediction_res) > 0:
                obstacles_prediction_res_bytes = pickle.dumps(obstacles_prediction_res)
                send_output(
                    "obstacles_prediction_res", obstacles_prediction_res_bytes, dora_input["metadata"]
                )
            else:
                send_output(
                    "obstacles_prediction_res", pickle.dumps(np.array([])), dora_input["metadata"]
                )
            
        return DoraStatus.CONTINUE
