import json
import os
import time
from typing import Sequence

import cv2
import numpy as np

from camera.camera import Camera, CameraBaseTransforamtion

# from model.transforms import denormalize_end, normalize_joint
from robots.piper import PiperArm
from vlms.hkustgz_api import test_api_direct_V2
from vlms.human import get_human_coordinates
from vlms.qwen import get_placepick_qwen_pixel


class PickPlaceRobot:
    raise_height: float = 0.2
    # raise_height: float = 0.05  # Clothing Folding --harold
    # exe_delay: float = 0.2

    def __init__(self, arm: PiperArm, camera: Camera, vlm_api: str, save_data: bool = True):
        self.arm = arm
        self.camera = camera
        self.transform = CameraBaseTransforamtion(f"camera/{self.camera.model}/{self.arm.can}")
        self.vlm_api = vlm_api
        self.apis = {"qwen": get_placepick_qwen_pixel, "human": get_human_coordinates, "ust": test_api_direct_V2}
        self.save_data = save_data
        self.data = {"pick_pixel": [], "place_pixel": []}
        if self.save_data:
            self.data_path = f"data/pickplace_data/{self.vlm_api}_{time.strftime('%Y%m%d_%H%M%S')}/"
            if not os.path.exists(self.data_path):
                os.makedirs(self.data_path)

        self.tasks = [
            # "pick the red pen and place into the black box",
            # "pick the small piece of trash and place into the top of the brown box",
            # "pick the bottle cap and place into the top of the brown box",
            # "pick the paper trash and place into the top of the brown box",
            # "pick the black cube and place into the top of the black box",
            # "pick the straw and place into the top of the brown box",
            # "pick the a roll of tape and place into the top of the black box",
            # "pick the red box and place into the top of the black box",
            # "pick the plastic bottle and place into the top of the brown box",
            # "pick the tissue and place into the top of the brown box",
            # "pick the red pen into the pink cup",
            "pick the red cube into the metal bow",
            "pick the white bottle cap into the brown box",
            "pick the straw into the brown box",
            "pick the tissue into the  brown box",
            "pick the small bottle cap into the brown box",
            "pick the paper trash into the  brown box",
            "pick the blue candy into the black box",
        ]

    def raise_up(self):
        """
        raise the arm to the raise_height if it is too low
        """
        xyz = self.arm.get_effector_xeuler()[:3]
        if xyz[2] < self.raise_height:
            xyz[2] += self.raise_height
            self.arm.move_to(xyz)

    def horizontal_move_to(self, xyz: Sequence[float]):
        current_xyz = self.arm.get_effector_xeuler()[:3]
        horizontal_xyz = np.concat([xyz[:2], current_xyz[2:]])
        self.arm.move_to(horizontal_xyz)

        # middel_pts = np.linspace(current_xyz, target_xyz, 3)

    # def raise_and_move_to(self, xyz: Sequence[float]):

    #     current_xyz = self.arm.get_effector_xeuler()[:3]

    #     # can be modified
    #     mid_point1 = np.array(current_xyz)  # * 0.9 + np.array(xyz) * 0.1
    #     mid_point1[2] += self.raise_height

    #     self.arm.move_to(mid_point1)
    #     time.sleep(self.exe_delay)  # execution + time

    #     mid_point2 = np.array(xyz)  # * 0.1 + np.array(xyz) * 0.9
    #     mid_point2[2] += self.raise_height

    #     self.arm.move_to(mid_point2)
    #     time.sleep(self.exe_delay)  # execution + time

    #     self.arm.move_to(xyz)
    #     time.sleep(self.exe_delay)

    def run(self):

        self.arm.go_zero()

        raw_task = self.tasks[:]

        task = (
            "Your task is to pick and place the items into the box:\n"
            "Please carefully check the trash, snacks, toys or tools outside of box.\n"
            "If there is no trash, snacks, toys or tools outside of box, return <Task finished: true>, otherwise return <Task finished: false>.\n"
            "Note: \n1. pick and place only ONE item in one step.\n"
            "2. strictly following the OUTPUT FORMAT.\n"
            "3. DO NOT pick up the item that is already in the box.\n"
            # "4. central the pixel on the item you want to pick.\n"
            # "Your task is to clean up the table:\n"
            # "1. There are trash and non-trash staffs outside of the brown box and the white box on the table.\n"
            # "2. Pick up the trash,such as paper, bottlecap, into the BROWN box.\n"
            # "3. Pick up snacks into the WHITE box\n"
            # "4. If nothing outside of box, do nothing.\n"
            # "5. If there is nothing outside of box, return <Task finished: true>, otherwise return <Task finished: false>.\n"
            # "Note:\n1. pick and place only ONE item in one step.\n 2. strictly following the OUTPUT FORMAT.\n"
            # "3. Only choose the pixle that is on the best place to grasp the item.\n"
        )

        pick_base_3d = None
        place_base_3d = None

        while True:
            # key = cv2.waitKey(1) & 0xFF
            # key = input("T: new task; Q: break; R: reset\n")
            # if key == "q":
            #     break
            # elif key == "r":
            #     self.tasks = raw_task[:]
            # elif key == "t":
            #     # 输入任务描述并调用Qwen
            # task = "clean the trash outside of the top of the brown box into the top of the top of the brown box"

            if self.vlm_api == "human":
                img_path, img = self.camera.save_current_image()
                task = "human input"
            else:
                #     task = input("请输入机械臂的动作任务: ")
                # if task:
                # task = "human"
                # task = self.tasks.pop()
                # print(f"Current task:{task}")
                while self.arm.is_in_motion():
                    time.sleep(0.1)
                img_path, img = self.camera.save_current_image()
                print("Saved current image!")
                # pixel points to 3d coordinates
            pixel_out = self.apis[self.vlm_api](img_path, task)

            if self.vlm_api != "human" and pixel_out["finished"] is True:
                print("Task finished! standy by...")
                self.arm.go_zero()
                time.sleep(3)
                continue

            print("Need cleaning!")
            try:
                pick_pixel, place_pixel = pixel_out["left_pick"], pixel_out["left_place"]
                self.camera.save_annotated_image(pick_pixel, place_pixel)
                # transform camera coordinates to robot coordinates
                pick_camera_3d = self.camera.get_3d_point_from_pixel(*pick_pixel)
                pick_base_3d = self.transform(pick_camera_3d)

                place_camera_3d = self.camera.get_3d_point_from_pixel(*place_pixel)
                place_base_3d = self.transform(place_camera_3d)
                assert len(pick_base_3d) == 3, f"invalid 3d coordinates {pick_base_3d}"
                assert len(place_base_3d) == 3, f"invalid 3d coordinates {place_base_3d}"
            except:
                print(f"{pixel_out} is not valid!")
                continue

            # elif key == "r":
            if pick_base_3d is None or place_base_3d is None:
                continue
            print("Begin to pick the item")
            
            self.arm.open_gripper()

            pick_pos = pick_base_3d
            place_pos = place_base_3d

            pick_pos[2] = pick_pos[2] + 0.01  # down 1cm
            place_pos[2] = place_pos[2] + 0.05  # up 15 cm

            self.raise_up()
            self.horizontal_move_to(pick_pos)
            self.arm.move_to(pick_pos)

            self.arm.close_gripper()

            # test
            pick_real_pos = self.arm.get_effector_xeuler()[:3]
            self.raise_up()
            time.sleep(0.2)
            print("Gripping effort: ", self.arm.arm.GetArmGripperMsgs().gripper_state.grippers_effort)
            attempt = 0
            while not self.arm.is_gripping_sth() and attempt < 1:
                self.arm.open_gripper()
                # compute the new pick position
                pick_camera_3d = self.camera.get_3d_point_from_pixel(*pick_pixel)
                pick_xyz = self.transform(pick_camera_3d)
                pick_pos = np.concatenate([pick_xyz[:2], [pick_xyz[2] - 0.005 * attempt]])
                self.arm.move_to(pick_pos)
                self.arm.close_gripper()
                self.raise_up()
                time.sleep(0.2)
                attempt += 1

            if not self.arm.is_gripping_sth():
                print("Gripping failed!")
                self.raise_up()
                continue
            
            self.horizontal_move_to(place_pos)
            self.arm.move_to(place_pos)
            # self.arm.move_to(place_pos)
            self.arm.open_gripper()

            place_actual_pos = self.arm.get_effector_xeuler()[:3]
            # place_real_joints = self.arm.get_joint_status()

            print(f"Pixels: {pick_pixel} -> {place_pixel}")
            print(f"Camera 3D: {pick_camera_3d} -> {place_camera_3d}")
            print(f"Base 3D: {pick_pos.round(5)} -> {place_pos.round(5)}")
            print(f"Arm move: {pick_real_pos.round(3)} -> {place_actual_pos.round(3)}")

            time.sleep(0.3)  # wait for execution
            # self.arm.raise_up(height=200)
            self.raise_up()

            # record data
            if self.save_data:
                # opencv image is BGR
                cv2.imwrite(self.data_path + f"img_{len(self.data['pick_pixel'])}.jpg", img)
                self.data["pick_pixel"].append(pick_pixel)
                self.data["place_pixel"].append(place_pixel)

            # else:
            #     break

            self.arm.go_zero()
            # self.arm.raise_up()

        # else:
        #     continue


if __name__ == "__main__":

    import sys

    # import hydra
    # from omegaconf import DictConfig
    # diffusion partial ik
    # with hydra.initialize(version_base=None, config_path="cfg"):
    #     ej_cfg: DictConfig = hydra.compose(
    #         config_name="train_diffusion_partial_ik"
    #     )  # , overrides=["model.denoising_steps=100"]
    #     ik_model = hydra.utils.instantiate(ej_cfg.model)
    #     ik_model.load_ckpt(ej_cfg.ckp_name + ".ckp")
    #     # mlp partial fk
    #     fk_cfg: DictConfig = hydra.compose(config_name="train_mlp_partial_fk")
    #     fk_model = hydra.utils.instantiate(fk_cfg.model)
    #     fk_model.load_ckpt(fk_cfg.ckp_name + ".ckp")
    #     # set fk_model
    #     ik_model.fk_model = fk_model.model

    if len(sys.argv) < 3:
        print("Usage: python pickplace.py <can_port> <vlm_api>")
        print(sys.argv)
        sys.exit(1)

    can_port = sys.argv[1]
    arm = PiperArm(can_port, wait_execution=True)
    arm.gripper_slope = 1
    camera = Camera()
    self = PickPlaceRobot(
        arm,
        camera,
        sys.argv[2],
        save_data=True,
    )

    try:
        self.run()
    except KeyboardInterrupt:
        if self.save_data and len(self.data["pick_pixel"]) > 0:
            with open(self.data_path + "pixels.json", "w") as f:
                json.dump(self.data, f)

# put the red cube to the top of the top of the brown box

# python pickplace.py human
# python pickplace.py qwen
