import time
from typing import Sequence

import numpy as np

from camera.camera import Camera, CameraBaseTransforamtion

# from model.transforms import denormalize_end, normalize_joint
from robots.piper import PiperArm
from vlms.human import get_human_coordinates
from vlms.qwen import get_placepick_qwen_pixel


class PickPlaceRobot:
    raise_height: float = 100
    exe_delay: float = 0.1

    def __init__(self, arm: PiperArm, camera: Camera, vlm_api: str, fk_model=None):
        self.arm = arm
        self.camera = camera
        self.transform = CameraBaseTransforamtion(f"camera/{self.camera.model}/{self.arm.can}")
        self.pick_base_3d = None
        self.place_base_3d = None
        self.vlm_api = vlm_api
        self.apis = {"qwen": get_placepick_qwen_pixel, "human": get_human_coordinates}

        # just for testing
        self.fk_model = fk_model

        self.tasks = [
            # "pick the red pen and place into the black box",
            # "pick the small piece of trash and place into the top of the brown box",
            # "pick the bottle cap and place into the top of the brown box",
            # "pick the paper trash and place into the top of the brown box",
            # "pick the black cube and place into the top of the black box",
            # "pick the straw and place into the top of the brown box",
            # "pick the a roll of tape and place into the top of the black box",
            # "pick the red box and place into the top of the black box",
            # "pick the plastic bottle and place into the top of the brown box",
            # "pick the tissue and place into the top of the brown box",
            # "pick the red pen into the pink cup",
            "pick the red cube into the metal bow",
            "pick the white bottle cap into the brown box",
            "pick the straw into the brown box",
            "pick the tissue into the  brown box",
            "pick the small bottle cap into the brown box",
            "pick the paper trash into the  brown box",
            "pick the blue candy into the black box",
        ]

    def raise_and_move_to(self, xyz: Sequence[float]):

        current_xyz = self.arm.get_effector_xquat()["xyz"][:3]

        # can be modified
        mid_point1 = np.array(current_xyz) * 0.9 + np.array(xyz) * 0.1
        mid_point1[2] += self.raise_height
        self.arm.move_to(mid_point1)

        mid_point2 = np.array(current_xyz) * 0.1 + np.array(xyz) * 0.9
        mid_point2[2] += self.raise_height
        self.arm.move_to(mid_point2)
        time.sleep(self.exe_delay)  # execution + time
        self.arm.move_to(xyz)

    def run(self):

        self.arm.go_zero()

        raw_task = self.tasks[:]

        while self.tasks:
            # key = cv2.waitKey(1) & 0xFF
            # key = input("T: new task; Q: break; R: reset\n")
            # if key == "q":
            #     break
            # elif key == "r":
            #     self.tasks = raw_task[:]
            # elif key == "t":
            #     # 输入任务描述并调用Qwen
            # task = "clean the trash outside of the top of the brown box into the top of the top of the brown box"

            if self.vlm_api == "human":
                img_path = self.camera.save_current_image()
                task = "human input"
            else:
                task = input("请输入机械臂的动作任务: ")
            if task:
                # task = "human"
                # task = self.tasks.pop()
                print(f"Current task:{task}")
                img_path = self.camera.save_current_image()
                # pixel points to 3d coordinates
                pixel_out = self.apis[self.vlm_api](img_path, task)
                try:
                    pick_point_pixel, place_point_pixel = pixel_out["left_pick"], pixel_out["left_place"]
                    self.camera.save_annotated_image(pick_point_pixel, place_point_pixel)
                    # transform camera coordinates to robot coordinates
                    pick_camera_3d = self.camera.get_3d_point_from_pixel(*pick_point_pixel)
                    self.pick_base_3d = self.transform(pick_camera_3d)

                    place_camera_3d = self.camera.get_3d_point_from_pixel(*place_point_pixel)
                    self.place_base_3d = self.transform(place_camera_3d)
                except:
                    print(f"{pixel_out} is not valid!")
                    continue

                # elif key == "r":
                if self.pick_base_3d is None or self.place_base_3d is None:
                    continue
                print("Begin to pick the item")

                self.arm.open_gripper()

                pick_pos = self.pick_base_3d
                place_pos = self.place_base_3d

                pick_pos[2] = max(20, pick_pos[2])  # raise up
                place_pos[2] = max(place_pos[2] + 50, 100)

                self.arm.move_to(pick_pos)

                self.arm.close_gripper()

                # test
                pick_real_pos = self.arm.get_effector_xquat()["xyz"][:3]
                pick_real_joints = self.arm.get_joint_status()["joint"]

                # while not self.arm.is_gripping_sth() and n_attempt < 5:
                #     self.arm.open_gripper()
                #     self.arm.raise_and_move_to(pick_pos + np.random.normal(size=pick_pos.shape))
                #     # can be modified
                #     # mid_point = np.array(pick_pos) * 0.3 + np.array(place_pos) * 0.7
                #     # mid_point[2] += 300
                #     # self.arm.move_to(mid_point)
                #     # time.sleep(0.2)  # execution + time
                #     self.arm.close_gripper()
                #     n_attempt += 1
                #     # else:
                # #     time.sleep(1)

                # place the item
                self.raise_and_move_to(place_pos)
                # self.arm.move_to(place_pos)
                self.arm.open_gripper()

                place_actual_pos = self.arm.get_effector_xquat()["xyz"][:3]
                place_real_joints = self.arm.get_joint_status()["joint"]

                print(f"Pixels: {pick_point_pixel} -> {place_point_pixel}")
                print(f"Camera 3D: {pick_camera_3d} -> {place_camera_3d}")
                print(f"Base 3D: {pick_pos.round(3)} -> {place_pos.round(3)}")
                print(f"Arm move: {pick_real_pos.round(3)} -> {place_actual_pos.round(3)}")

                if self.fk_model is not None:
                    fk_pick = self.fk_model(pick_real_joints)
                    fk_place = self.fk_model(place_real_joints)
                    print(f"FK model: {fk_pick.round(3)} -> {fk_place.round(3)}")

                time.sleep(0.3)  # wait for execution
                # self.arm.raise_up(height=200)
                self.arm.go_zero()

            else:
                break

            # self.arm.go_zero()
            # self.arm.raise_up()

        # else:
        #     continue


if __name__ == "__main__":

    import sys

    import hydra
    from omegaconf import DictConfig

    # diffusion partial ik
    with hydra.initialize(version_base=None, config_path="cfg"):
        ej_cfg: DictConfig = hydra.compose(
            config_name="train_diffusion_partial_ik"
        )  # , overrides=["model.denoising_steps=100"]
        ik_model = hydra.utils.instantiate(ej_cfg.model)
        ik_model.load_ckpt(ej_cfg.ckp_name + ".ckp")

        # mlp partial fk
        fk_cfg: DictConfig = hydra.compose(config_name="train_mlp_partial_fk")
        fk_model = hydra.utils.instantiate(fk_cfg.model)
        fk_model.load_ckpt(fk_cfg.ckp_name + ".ckp")

        # set fk_model
        ik_model.fk_model = fk_model.model

    can_port = "can0"
    arm = PiperArm(can_port, ik_model, wait_execution=False)
    camera = Camera()
    self = PickPlaceRobot(arm, camera, sys.argv[1], fk_model=fk_model)
    self.run()


# put the red cube to the top of the top of the brown box

# python pickplace.py human
# python pickplace.py qwen
