#!/usr/bin/env python3

import rclpy
import cv2
import numpy as np
from cv_bridge import CvBridge
from manipulation.scripts.detection.commans.base_detection_node import BaseDetectionNode
from manipulation.scripts.detection.yolo.yolo_detection import YoloDetection
from manipulation.scripts.detection.apriltag.apriltag_detection import AprilTagDetection
from scipy.spatial.transform import Rotation
from threading import Thread
from PIL import Image
import time
from manipulation.scripts.detection.yolo.config import Config as conf
from manipulation.scripts.detection.yolo.config import target_center_from_xyxy

class YoloDetectionNode(BaseDetectionNode):
    """
    同时在视野中包含apriltag和抓取目标, apriltag为桌上的refer_frame, 通过yolo识别抓取目标中心点,
    将refer_frame平移得到target_frame抓取目标坐标系, 每次仅取refer_frame xy第一象限中最靠近原点的作为target
    """
    def __init__(self):
        super().__init__('yolo_detection_node')

        self.april_tag_detection = AprilTagDetection()

        self.yolo = YoloDetection(model_name=conf.YOLO_MODEL_NAME)

        self.cv_bridge = CvBridge()
        self.thread = Thread(target=self.detect, daemon=True)
        self.thread.start()

        self.get_logger().info(
            f"YoloDetectionNode initialized with model_name={conf.YOLO_MODEL_NAME}, "
            f"available refer tag ids={conf.REFER_TAG_IDS}, "
            f"refer apriltag size={conf.REFER_TAG_SIZE}m, "
            f"target surface height={conf.TARGET_SURFACE_HEIGHT}m",
        )
    
    def detect(self):
        while True:
            self.publish_ideal_camera_tf()
            rgb_img, depth_img = self.get_frame()
            h, w, _ = rgb_img.shape
            render_img = rgb_img.copy()

            ### apriltag识别参考系, yolo框选target ###
            at_img, tag_infos = self.april_tag_detection.draw_detected_corners(
                rgb_img=rgb_img,
                K=self.camera.K,
                dist=np.array(self.camera.dist, np.float32),
                draw_text=False,
                tag_size=conf.REFER_TAG_SIZE
            )
            boxes, yolo_img = self.yolo.detect(rgb_img)
            xyxys = [x.xyxy.detach().cpu().numpy().ravel() for x in boxes]
            xyxys = sorted(xyxys, key=lambda xyxy: xyxy[0])

            dist_fn = np.linalg.norm
            frames = []  # 全部需要publish的frame (refer, left_refer, right_refer, target_0)
            refer_frame = None  # 名称为refer的抓取参考系
            right_refer_frame = None  # 名称为right_refer参考系
            apriltag_frames = []  # 只要是apriltag就行 (任何名称中包含了refer的参考系)
            target_center, target_idx = None, -1
            # 获取图像中refer frame (当存在多个refer时取最左侧)
            for id in tag_infos:
                if id not in conf.REFER_TAG_IDS: continue
                for info in tag_infos[id]:
                    axis_center_pos = np.array([[0,0,0]], np.float32)
                    axis_img_pos = self.camera.world_pos2img_pos(axis_center_pos, info['R'], info['t'])
                    pos = axis_img_pos[0]  # (x, y)
                    now_frame = {
                        'R': info['R'],
                        't': info['t'],
                        'img_pos': pos,  # 坐标系原点在图中位置
                        'child_frame_id': conf.REFER_TAG_IDS[id],
                    }
                    apriltag_frames.append(now_frame)
                    if conf.REFER_TAG_IDS[id] == 'refer' and (refer_frame is None or pos[0] < refer_frame['img_pos'][0]):
                        refer_frame = now_frame
                    if conf.REFER_TAG_IDS[id] != 'refer':
                        frames.append(now_frame)
                    if conf.REFER_TAG_IDS[id] == 'right_refer':
                        right_refer_frame = now_frame
            if refer_frame is not None:
                frames.append(refer_frame)
                render_img = self.camera.draw_axis(render_img, refer_frame['R'], refer_frame['t'])
                # 取离refer frame最近的作为当前target
                for i, xyxy in enumerate(xyxys):
                    center = target_center_from_xyxy(xyxy, w, h)
                    cv2.circle(render_img, center, 8, (255, 255, 255), -1)
                    if center[0] < refer_frame['img_pos'][0]: continue  # 只考虑refer右侧的target
                    if target_center is None or dist_fn(center - refer_frame['img_pos']) < dist_fn(target_center - refer_frame['img_pos']):
                        target_center = center
                        target_idx = i

                if target_center is not None:
                    # 计算target_frame
                    tx, ty = target_center
                    rx, ry = refer_frame['img_pos']
                    # world_pos = [[0, 0, 0], [0, conf.REFER_TAG_SIZE, 0]]
                    # img_pos = self.camera.world_pos2img_pos(world_pos, refer_frame['R'], refer_frame['t'])
                    # real_y_per_img_y = conf.REFER_TAG_SIZE / (img_pos[0, 1] - img_pos[1, 1])
                    # delta_depth = (ry - ty) * real_y_per_img_y * np.sin(conf.HEAD_DOWN_THETA)  # 考虑桌子夹角的深度变化, 变化不大, 计算可能不准
                    # t_depth = refer_frame['t'][2] + delta_depth - conf.TARGET_SURFACE_HEIGHT  # 这个计算出的效果不如深度图好
                    # DEBUG
                    # print(f"read/calc refer depth={depth_img[ry, rx]/1000:4f}/{refer_frame['t'][2]:4f}, target depth={depth_img[ty, tx]/1000:4f}/{t_depth:4f}")
                    # x_world = self.camera.img_pos2world_pos(target_center, refer_frame['R'], refer_frame['t'], xs_depth=[t_depth])[0]  # 基于calc depth
                    # 使用深度相机计算更准确
                    x_world = self.camera.img_pos2world_pos(target_center, refer_frame['R'], refer_frame['t'], depth_img=depth_img+45)[0]  # 基于depth img (经过refer和target_0误差比对需要对深度图在加一点深度)
                    target_frame = {
                        'R': refer_frame['R'],
                        't': refer_frame['t'] + (refer_frame['R'] @ x_world.reshape(3, 1)).ravel(),
                        'img_pos': target_center,
                        'child_frame_id': 'target_0',
                    }
                    frames.append(target_frame)
                    render_img = self.camera.draw_axis(render_img, target_frame['R'], target_frame['t'])
                    self.targets_list = [target_frame]
            
            ### 处理非target的工件, 发布workpiece_0,1,2... ###
            if target_idx != -1:
                xyxys.pop(target_idx)
            if len(apriltag_frames):
                apriltag_frames = sorted(apriltag_frames, key=lambda x: x['img_pos'][0])
                workpiece_num = 0
                for i, xyxy in enumerate(xyxys):
                    center = target_center_from_xyxy(xyxy, w, h)
                    near_frame = apriltag_frames[0]
                    # 不考虑refer右侧right_refer左侧的工件
                    ll, rr = -np.inf, np.inf
                    if refer_frame is not None: ll = refer_frame['img_pos'][0]
                    if right_refer_frame is not None: rr = right_refer_frame['img_pos'][0]
                    # print(f"workpiece{i}, {ll=}, {rr=}, {xyxy[0]=}")
                    if (not np.isinf(ll) or not np.isinf(rr)) and ll < xyxy[0] < rr:
                        continue
                    for frame in apriltag_frames[1:]:
                        if dist_fn(frame['img_pos']-center) < dist_fn(near_frame['img_pos']-center):
                            near_frame = frame
                    x_world = self.camera.img_pos2world_pos(center, near_frame['R'], near_frame['t'], depth_img=depth_img+45)[0]
                    frame = {
                        'R': near_frame['R'],
                        't': near_frame['t'] + (near_frame['R'] @ x_world.reshape(3, 1)).ravel(),
                        'img_pos': center,
                        'child_frame_id': f'workpiece_{workpiece_num}'
                    }
                    frames.append(frame)
                    workpiece_num += 1
            
            ### 发布frame tf信息 ###
            for frame_info in frames:
                self.publish_tf_from_detect_info(frame_info['child_frame_id'], frame_info)

            ### 图像合成 ###
            render_img[rgb_img != at_img] = at_img[rgb_img != at_img]
            render_img[rgb_img != yolo_img] = yolo_img[rgb_img != yolo_img]
            # topic发布
            render_msg = self.cv_bridge.cv2_to_imgmsg(render_img, encoding='bgr8')
            render_msg.header.stamp = self.get_clock().now().to_msg()
            render_msg.header.frame_id = 'camera'
            self.pub_detected_img.publish(render_msg)
            """
            参考代码: src/manipulation/scripts/detection/apriltag/apriltag_node.py

            对于每个target
            通过YOLO识别出上表面的中心点, 以及世界坐标系下的至少3个点, 
            通过self.camera.img_pos2camera_pos可以直接将像素坐标转换为相机坐标系下的三维坐标,
            通过cv2.solvePnP可以计算出相机坐标系到物体坐标系的旋转矩阵和平移矩阵.

            对每个target计算出
            {
                'child_frame_id': str,  # Frame ID of the detected object, target_0, target_1, ...
                'R': np.ndarray,  # Rotation matrix of the detected object in camera frame
                't': np.ndarray,  # Translation vector of the detected object in camera frame
            }

            1. 更新self.targets_list
            2. 将识别后的图片publish到self.pub_detected_img节点上
            """
