import json
import os

import cv2


class PixelClicker:
    def __init__(
        self,
    ):
        self.img_path = "/home/knowin/codes/knowin-robot/imgs/current_view.jpg"
        self.output_path = "/home/knowin/codes/knowin-robot/txts/message_response.json"
        self.original_image = cv2.imread(self.img_path)
        self.window_name = "Click order: left_pick, left_place, right_pick, right_place"
        self.scale = 1.0
        self.offset_x = 0
        self.offset_y = 0
        self.dragging = False
        self.last_x, self.last_y = 0, 0
        self.pixels = {}
        self.camera_xyz = []
        self.gripper_xyz = []
        self.colors = {
            "left_pick": (0, 255, 255),  # 黄色
            "left_place": (0, 0, 255),  # 红色
            "right_pick": (255, 255, 0),  # 青色
            "right_place": (255, 0, 255),  # 紫色
        }

        self.current_task = "pick_place"

        self.pick_order = ["left_pick", "left_place", "right_pick", "right_place"]

    def display_coordinates(self, event, x, y, flags, param):
        """鼠标事件回调函数"""
        # 将显示坐标转换为原始图像坐标
        original_x = int((x + self.offset_x) / self.scale)
        original_y = int((y + self.offset_y) / self.scale)

        # 确保坐标在有效范围内
        if (
            original_x < 0
            or original_x >= self.original_image.shape[1]
            or original_y < 0
            or original_y >= self.original_image.shape[0]
        ):
            return

        if event == cv2.EVENT_LBUTTONDOWN:
            # 左键点击：添加标记点
            if len(self.pixels) < 4:
                pixel = [original_x, original_y]
                tag = self.pick_order[len(self.pixels)]
                self.pixels[tag] = pixel

                print(
                    f"Add {tag} pixel=({original_x}, {original_y}), "
                    f"color={self.original_image[original_y, original_x]}"
                )

        elif event == cv2.EVENT_RBUTTONDOWN:
            # 右键点击：删除最近添加的点
            if self.pixels:
                removed_key = self.pick_order[len(self.pixels) - 1]
                removed = self.pixels.pop(removed_key)
                print(f"del: {removed_key} pixel=({removed[0]}, {removed[1]})")

        elif event == cv2.EVENT_MBUTTONDOWN:
            # 中键点击：开始拖动
            self.dragging = True
            self.last_x, self.last_y = x, y

        elif event == cv2.EVENT_MOUSEMOVE:
            # 鼠标移动
            if self.dragging:
                # 拖动图像
                dx = x - self.last_x
                dy = y - self.last_y
                self.offset_x += dx
                self.offset_y += dy
                self.last_x, self.last_y = x, y

        elif event == cv2.EVENT_MBUTTONUP:
            # 中键释放：结束拖动
            self.dragging = False

        # 显示当前鼠标位置的坐标
        cv2.displayOverlay(self.window_name, f"Pixel: ({original_x}, {original_y}) Task: {self.current_task}", 100)

    def run(self):
        """运行标定工具"""

        cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
        cv2.setMouseCallback(self.window_name, self.display_coordinates)

        while True:
            # 创建当前视图的副本
            display = self.original_image.copy()  # cv2.imread("imgs/current_view.jpg")
            # 绘制所有标记点
            for tag, (px, py) in self.pixels.items():
                color = self.colors[tag]
                cv2.circle(display, (px, py), 2, color, -1)
                cv2.putText(display, tag, (px + 10, py - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

            # 应用缩放和偏移
            h, w = display.shape[:2]
            view_x1 = max(0, int(self.offset_x / self.scale))
            view_y1 = max(0, int(self.offset_y / self.scale))
            view_x2 = min(w, int((self.offset_x + w) / self.scale))
            view_y2 = min(h, int((self.offset_y + h) / self.scale))

            view = display[view_y1:view_y2, view_x1:view_x2]
            view = cv2.resize(view, (w, h))

            # 显示图像
            cv2.imshow(self.window_name, view)

            # 处理键盘事件
            key = cv2.waitKey(1) & 0xFF

            if key == 27:  # ESC键退出
                if os.path.exists(self.output_path):
                    os.remove(self.output_path)
                break
            elif key == ord("+") or key == ord("="):  # 放大
                self.scale *= 1.1
            elif key == ord("-"):  # 缩小
                self.scale /= 1.1
            elif key == ord("r"):  # reload the image
                self.original_image = cv2.imread(self.img_path)
                # remove all the points
                self.pixels = {}
                self.scale = 1.0
                self.offset_x = 0
                self.offset_y = 0
            elif key == ord("1"):  # pick_place
                self.current_task = "pick_place"
            elif key == ord("2"):  # drag_move
                self.current_task = "drag_move"
            elif key == ord("s"):  # Save the points
                cv2.imwrite("/home/knowin/codes/knowin-robot/imgs/annotated_current_view.jpg", display)
                print(f"Saved annotated image to: /home/knowin/codes/knowin-robot/imgs/annotated_current_view.jpg")
                # send the message to the robot: use the same as LLMs
                with open(self.output_path, "w") as f:
                    # os.makedirs(self.)
                    response = self.pixels.copy()
                    response["task"] = self.current_task
                    json.dump(response, f)
                print(f"Send txt messages to {self.output_path} ")

        cv2.destroyAllWindows()


if __name__ == "__main__":

    human_pixel_api = PixelClicker()
    human_pixel_api.run()
