import asyncio
import logging

import numpy as np

from camera.camera import Camera, CameraBaseTransforamtion
from robots.skilled_piper import SkilledPiper

# from shared_memory import ArmStatus, SharedMemory, TaskCoordinator
from vlms.hkustgz_api import test_api_direct_V2
from vlms.human import get_human_coordinates

log = logging.getLogger(__name__)


def parser_args(skill: str, xyz: np.ndarray, end_xyz: np.ndarray, height: float):
    if skill == "raise_to":
        return [height]
    elif skill == "pick_lift":
        return [xyz, height]
    elif skill == "place_down":
        return [xyz, height]
    elif skill == "press_hold":
        return [xyz]
    elif skill == "pick_place":
        return [xyz, end_xyz, height]
    elif skill == "line_move":
        return [xyz, end_xyz, height]
    else:
        return []


class ClothesFolder:

    def __init__(self, left_arm: SkilledPiper, right_arm: SkilledPiper, camera: Camera, vlm_api: str = "human"):
        self.left_arm = left_arm
        self.right_arm = right_arm
        self.vlm_api = vlm_api
        self.apis = {"human": get_human_coordinates, "ust": test_api_direct_V2}
        self.camera = camera
        self.L_transform = CameraBaseTransforamtion("camera/L515/" + left_arm.can)
        self.R_transform = CameraBaseTransforamtion("camera/L515/" + right_arm.can)

        self.go_zero()

        self.tasks = [
            "pull to center",
            "flatten the clothes",
            "fold the clothes",
        ]
        self._lock = asyncio.Lock()

    def go_zero(self):
        self.left_arm.go_zero()
        self.right_arm.go_zero()
        return self.left_arm.wait_motion_done() and self.right_arm.wait_motion_done()

    async def get_shared_status(self):

        async with self._lock:
            return {
                "left_arm": self.left_arm.get_status(),
                "right_arm": self.right_arm.get_status(),
            }

    def run(self):
        self.go_zero()

        while True:

            quest = {}

            if self.vlm_api == "human":
                img_path, img = self.camera.save_current_image()
                pixel_out = self.apis[self.vlm_api](img_path)

            else:
                raise NotImplementedError(f"VLM API {self.vlm_api} not implemented")

            try:

                left_pixel, right_pixel = pixel_out["left"]["pixels"], pixel_out["right"]["pixels"]

                if pixel_out["left"]["action"]:
                    camera_3d = [self.camera.get_3d_point_from_pixel(*_) for _ in left_pixel]
                    left_base_3d = [self.L_transform(_) if _ is not None else None for _ in camera_3d]
                    quest["left_arm"] = {
                        "action": pixel_out["left"]["action"],
                        "args": left_base_3d + [pixel_out["left"]["height"]],
                    }

                    quest["left_arm"]["args"] = parser_args(quest["left_arm"]["action"], *quest["left_arm"]["args"])
                else:
                    quest["left_arm"] = None

                if pixel_out["right"]["action"]:
                    camera_3d = [self.camera.get_3d_point_from_pixel(*_) for _ in right_pixel]
                    right_base_3d = [self.R_transform(_) if _ is not None else None for _ in camera_3d]
                    quest["right_arm"] = {
                        "action": pixel_out["right"]["action"],
                        "args": right_base_3d + [pixel_out["right"]["height"]],
                    }
                    quest["right_arm"]["args"] = parser_args(quest["right_arm"]["action"], *quest["right_arm"]["args"])
                else:
                    quest["right_arm"] = None
            except Exception as e:
                print(f"pixel_out={pixel_out} is invalid! {e}")
                continue

            if not quest["left_arm"] and not quest["right_arm"]:
                print(f"pixel_out={pixel_out} is empty!")
                continue

            loop = asyncio.get_event_loop()
            loop.run_until_complete(self.execute(quest))

    async def execute(self, quest: dict):
        """
        quest = {
            "left_arm": {"action": "raise_to", "args": [0.6]},
            "right_arm": {
                "action": "pick_lift",
                "args": [np.array([0.408, 0.2, 0.045]), 0.45],
            },
        }

        loop.run_in_executor only accept positional arguments.

        Shared information executon?
        """
        left_task = quest.get("left_arm", None)
        right_task = quest.get("right_arm", None)
        tasks = []

        if left_task is not None:
            tasks.append(asyncio.create_task(self._async_execute(self.left_arm, left_task)))
        else:
            print("Left arm is standing by")
        if right_task is not None:
            tasks.append(asyncio.create_task(self._async_execute(self.right_arm, right_task)))
        else:
            print("Right arm is standing by")

        results = await asyncio.gather(*tasks)
        return all(results)

    async def _async_execute(self, arm: SkilledPiper, task: dict):
        loop = asyncio.get_event_loop()
        return await loop.run_in_executor(None, arm.__getattribute__(task["action"]), *task["args"])


if __name__ == "__main__":
    left_arm = SkilledPiper("can0", wait_execution=True)
    right_arm = SkilledPiper("can1", wait_execution=True)
    camera = Camera()
    self = ClothesFolder(left_arm, right_arm, camera)
    # self.go_zero()

    self.run()
    # self.run()

    # quest = {
    #     "left_arm": {"action": "raise_to", "args": [0.6]},
    #     "right_arm": {
    #         "action": "pick_lift",
    #         "args": [np.array([0.408, 0.2, 0.045]), 0.45],
    #     },
    # }

    # await self.execute(quest)

    # 示例：如何使用异步方法
    # async def run_example():
    #     L_pick_xyz = np.array([0.408, 0.2, 0.045])
    #     L_place_xyz = np.array([0.208, 0.5, 0.045])
    #     R_pick_xyz = np.array([0.408, -0, 0.045])
    #     R_place_xyz = np.array([0.108, -0.4, 0.045])

    #     await self.pick_place_async(L_pick_xyz, L_place_xyz, R_pick_xyz, R_place_xyz)

    # loop = asyncio.get_event_loop()
    # loop.create_task(run_example())

    # camera = Camera()
    # img = camera.save_current_image()
    # self = ClothesPlanner(Camera())
    # self.get_points_from_img()
    # self.save_annotated_image()
    # from model.grip_joint_mapping import EndJointModel
    # self = ClothesFolder(
    #     PiperArm("can_left", end_joint_model=EndJointModel(model_file="data/weights/nn_3d_6d_full_space.ckp"), wait_execution=False),
    #     PiperArm("can_right", end_joint_model=EndJointModel(model_file="data/weights/nn_3d_6d_full_space.ckp"), wait_execution=False),
    #     Camera(),
    # )
    # self.dual_move_and_grasp([518, -20, 0], [518, -20, 0])
    # robot.dual_grasp([518, -20, 0], [518, -20, 0])
    # robot.dual_release([318, 90, 50], [318, -90, 50])
