#!/usr/bin/env python3

import rospy
from sensor_msgs.msg import Image
from nav_msgs.msg import Odometry
from cv_bridge import CvBridge
from vision_msgs.msg import Detection2DArray
import cv2
import socket
import pickle
import numpy as np


class DetectionDrawer:
    def __init__(self):
        rospy.init_node('detection_drawer', anonymous=True)
        self.T_bw = None
        self.cur_det_timestamp = None
        self.last_det_timestamp = -1
        self.pose_dst_list = []
        self.camera_instrinsic = np.array(
            [[131.62694047, 0, 188.30012764], [0, 131.68686938, 208.27241035], [0, 0, 1]], dtype=np.float32)
        self.distortion = np.array(
            [0.09336196, -0.02524823, -0.00504034,  0.00126214], dtype=np.float32)
        self.T_cb = np.array([[0, -1, 0], [0, 0, -1], [
                             1, 0, 0]], dtype=np.float32)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber(
            "/oakcam_ffc_4p_ros/front/image", Image, self.image_callback)
        self.detection_sub = rospy.Subscriber(
            "/oakcam_ffc_4p_ros/yolov4_detections", Detection2DArray, self.detection_callback)
        self.odom_sub = rospy.Subscriber(
            "/bfctrl/local_odom", Odometry, self.odom_callback)
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 75]
        self.host = "192.168.10.60"
        self.port = 9816

    def quat2rot(self, quat):
        w, x, y, z = quat
        return np.array([[1-2*y*y-2*z*z, 2*x*y-2*z*w, 2*x*z+2*y*w],
                         [2*x*y+2*z*w, 1-2*x*x-2*z*z, 2*y*z-2*x*w],
                         [2*x*z-2*y*w, 2*y*z+2*x*w, 1-2*x*x-2*y*y]])

    def odom_callback(self, msg: Odometry):

        rot_wb = self.quat2rot(
            [msg.pose.pose.orientation.w, msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z])
        t_wb = np.array([msg.pose.pose.position.x,
                         msg.pose.pose.position.y, msg.pose.pose.position.z], dtype=np.float32)
        self.T_bw = np.zeros((3, 4), dtype=np.float32)
        self.T_bw[:3, :3] = rot_wb.T
        self.T_bw[:, 3] = -rot_wb.T@t_wb

    def image_callback(self, data):
        cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        self.current_image = cv_image

    def detection_callback(self, data):
        self.cur_det_timestamp = rospy.Time.now().to_sec()
        self.detections = data.detections

    def get_camera_coord(self, detection):
        undistorted_pt = np.array(
            [float(detection.bbox.center.x), float(detection.bbox.center.y)], dtype=np.float32)
        undistorted_pt = undistorted_pt.reshape(1, 1, 2)

        distorted_pt = cv2.fisheye.undistortPoints(
            undistorted_pt, self.camera_instrinsic, self.distortion, P=self.camera_instrinsic)
        pixel_pt = np.array(
            [distorted_pt[0][0][0], distorted_pt[0][0][1], 1.0], dtype=np.float32)
        camera_pt = np.linalg.inv(self.camera_instrinsic)@pixel_pt
        return camera_pt

    def resolve_target_wold_pose(self, detections):
        if self.cur_det_timestamp - self.last_det_timestamp < 0.5:
            camera_point = self.get_camera_coord(detections[0])
            if len(self.pose_dst_list) == 0:
                self.pose_dst_list.append(
                    (self.T_bw, camera_point))
            else:
                last_Tbw = self.pose_dst_list[-1][0]
                dist = np.linalg.norm(
                    last_Tbw[:, 3] - self.T_bw[:, 3])
                if dist > 0.03:
                    T_cw = self.T_cb@self.T_bw
                    self.pose_dst_list.append(
                        (T_cw, camera_point))
                if len(self.pose_dst_list) > 5:
                    self.triangulation(self.pose_dst_list[-5:])
        else:
            self.pose_dst_list = []
            self.last_det_timestamp = self.cur_det_timestamp

    def triangulation(self, points):
        A = np.zeros((2*len(points), 4), dtype=np.float32)
        for i, point in enumerate(points):
            Tbw = point[0]
            camera_point = point[1]
            T1 = Tbw[0, :]
            T2 = Tbw[1, :]
            T3 = Tbw[2, :]
            A[2*i, :] = T2 - camera_point[1]*T3
            A[2*i+1, :] = T1 - camera_point[0]*T3
        _, _, V = np.linalg.svd(A)
        X = V[:, -1]
        X = X / X[3]
        print(f"world point: {X[:3]}")

    def get_human_detection(self, detections):
        human_detection = []
        for detection in detections:
            if detection.results[0].id == 0:
                human_detection.append(detection)
        return human_detection

    def UDP_publish(self):
        if not hasattr(self, 'current_image') or not hasattr(self, 'detections'):
            return
        img = self.current_image
        detections = self.detections
        human_detection = self.get_human_detection(detections)
        # if len(human_detection) == 1 and self.T_bw is not None:
        #     self.resolve_target_wold_pose(human_detection)
        bbox = []
        for detection in human_detection:
            x = int(detection.bbox.center.x - detection.bbox.size_x / 2)
            y = int(detection.bbox.center.y - detection.bbox.size_y / 2)
            w = int(detection.bbox.size_x)
            h = int(detection.bbox.size_y)
            cv2.rectangle(img, (x, y),
                          (x + w, y + h), (0, 255, 0), 2)
            bbox.append((x, y, w, h))
        _, img_encoded = cv2.imencode('.jpg', img, self.encode_param)
        serialized_data = pickle.dumps(("image", img_encoded.tobytes(), bbox))
        # 发送图像数据
        self.socket.sendto(serialized_data, (self.host, self.port))


if __name__ == '__main__':
    try:
        drawer = DetectionDrawer()
        while not rospy.is_shutdown():
            drawer.UDP_publish()
            rospy.sleep(0.033)
    except rospy.ROSInterruptException:
        pass
