#!/usr/bin/env python3

import rclpy
import cv2
import numpy as np
import manipulation.scripts.constants.detection as const
from cv_bridge import CvBridge
from manipulation.scripts.detection.commans.base_detection_node import BaseDetectionNode
from manipulation.scripts.detection.yolo.yolo_detection import YoloDetection
from scipy.spatial.transform import Rotation
from threading import Thread
from manipulation.scripts.detection.yolo_graspnet.graspnet_and_yolo import GraspNetYoloProcessor
from PIL import Image
import time


class YoloDetectionNode(BaseDetectionNode):
    def __init__(self):
        super().__init__('yolo_detection_node')
        self.declare_parameter('yolo_model_name', 'best_s')  # Options: ['best_n', 'best_s', 'best_m', 'best_l', 'best_x']
        self.declare_parameter('yolo_engine', 'torch')  # Options: ['torch', 'onnx', 'tensorrt']
        self.model_name = self.get_parameter('yolo_model_name').get_parameter_value().string_value
        self.engine = self.get_parameter('yolo_engine').get_parameter_value().string_value
        self.graspnet_processor = GraspNetYoloProcessor(
            yolo_weight_name="/ws_618/src/manipulation/scripts/detection/yolo/models/618_surface_detect_m.pt",
            yolo_conf=0.30,      
            conf_keep=0.80,      
            max_area_ratio=0.35,
            min_area_ratio=0.001,
            border_tol=6,
            enlarge_ratio=1.3
        )

        self.cv_bridge = CvBridge()
        self.thread = Thread(target=self.detect, daemon=True)
        self.thread.start()

        # self.yolo = YoloDetection(model_name=self.model_name, engine=self.engine)
        self.get_logger().info(f"YOLO model initialized with name: {self.model_name}, engine: {self.engine}")

        
    
    def detect(self):
        print("in detect!!!")
        while True:
            print("detecting!")
            self.publish_ideal_camera_tf()
            rgb_img, depth_img = self.get_frame()
            # print("detecting!")
            # print("Detected boxes:")
            # # print(boxes)
            # print("Detected keypoints:")
            # print(keypoints)
            # data = np.load("/root/Coding/graspnet-baseline/mytest/files/20251003_140157/20251003_140202_21.npz")
            # print(data.keys())
            # rgb = data['rgb']   # uint8
            # depth = data['depth']  # uint16 (毫米)
    
            # rgb = rgb_img  # uint8
            # depth = depth_img  # uint16 (毫米)
            intrinsic = self.camera.K

            print("detecting!")
            # print(np.min(rgb_img), np.max(rgb_img), rgb_img.dtype)
            # print(np.min(depth_img), np.max(depth_img), depth_img.dtype)
            data = {
                'rgb': rgb_img,
                'depth': depth_img,
            }
            # timestamp = time.strftime("%Y%m%d_%H%M%S")
            # np.savez_compressed(f"/ws_618/src/manipulation/scripts/detection/yolo/{timestamp}.npz", **data)
            # exit()
            gg, cloud = self.graspnet_processor.process(rgb_img, depth_img, intrinsic, show=False)

            if len(gg) != 0:
                render_img = self.graspnet_processor.render_img

                print("out process")
                print(gg[0] if len(gg) > 0 else "No grasp found")
                print("detecting!")

                t = gg.translations[0]
                R = gg.rotation_matrices[0]

                img = self.draw_axis_in_image(render_img, R, t)

                self.targets_list = [{
                    'child_frame_id': "target_0",
                    'R': R,
                    't': t,
                }]
                self.publish_tf_from_detect_info("target_0", {'R': R, 't': t})
            else:
                img = rgb_img
            # Publish image
            img_msg = self.cv_bridge.cv2_to_imgmsg(img, encoding='bgr8')
            img_msg.header.stamp = self.get_clock().now().to_msg()
            img_msg.header.frame_id = 'camera'
            
            self.pub_detected_img.publish(img_msg)
            # image = Image.fromarray(img, 'RGB')
            # image.save('/ws_618/src/manipulation/scripts/detection/yolo/output.png')
            # exit()


            
            


            # self.pub_detected_img.publish(self.cv_bridge.cv2_to_imgmsg(render_img, encoding='bgr8'))

            """
            参考代码: src/manipulation/scripts/detection/apriltag/apriltag_node.py

            对于每个target
            通过YOLO识别出上表面的中心点, 以及世界坐标系下的至少3个点, 
            通过self.camera.img_pos2camera_pos可以直接将像素坐标转换为相机坐标系下的三维坐标,
            通过cv2.solvePnP可以计算出相机坐标系到物体坐标系的旋转矩阵和平移矩阵.

            对每个target计算出
            {
                'child_frame_id': str,  # Frame ID of the detected object, target_0, target_1, ...
                'R': np.ndarray,  # Rotation matrix of the detected object in camera frame
                't': np.ndarray,  # Translation vector of the detected object in camera frame
            }

            1. 更新self.targets_list
            2. 将识别后的图片publish到self.pub_detected_img节点上
            """
