import logging
import re
import time
from typing import Dict, List, Sequence

import cv2
import dashscope

from camera.camera import Camera, CameraBaseTransforamtion
from robots.piper import PiperArm
from vlms.qwen import get_clothes_qwen_pixel

log = logging.getLogger(__name__)


dashscope.api_key = "sk-4a47da58c2e64a53bc7b94d0892016be"
PICK_RE = re.compile(r"pick[^[]*\[\s*(\d+)\s*,\s*(\d+)\s*\]", re.I | re.S)
PLACE_RE = re.compile(r"place[^[]*\[\s*(\d+)\s*,\s*(\d+)\s*\]", re.I | re.S)


class VlmAPI:
    def __init__(self, camera: Camera):
        with open("data/texts/clothes_tasks.txt", "r") as f:
            self.tasks_list = f.read()

        self.camera = camera

        self.base_url = "https://envision-research.hkust-gz.edu.cn/vlm_cloth_api"
        self.api_url = f"{self.base_url}/api/predict"

        self.current_task = None
        self.left_points = None
        self.right_points = None
        self.response_text = None

        self.api = get_clothes_qwen_pixel

    def save_current_image(self):
        return self.camera.save_current_image()

    def save_annotated_image(self, save_file=".cache/imgs/annotated_current_view.jpg"):
        current_frame = self.camera.capture_current_frame()
        if self.left_points is not None:
            if any(self.left_points["pick"]):
                current_frame = self.camera.annotate_image(
                    current_frame, self.left_points["pick"], "L_PICK", color=(0, 255, 0)
                )
            if any(self.left_points["place"]):
                current_frame = self.camera.annotate_image(
                    current_frame, self.left_points["place"], "L_PLACE", color=(0, 255, 0)
                )
        if self.right_points is not None:
            if any(self.right_points["pick"]):
                current_frame = self.camera.annotate_image(
                    current_frame, self.right_points["pick"], "R_PICK", color=(255, 0, 0)
                )
            if any(self.right_points["place"]):
                current_frame = self.camera.annotate_image(
                    current_frame, self.right_points["place"], "R_PLACE", color=(255, 0, 0)
                )

            # save images
        cv2.imwrite(save_file, current_frame)
        log.info(f"Saved annotated image to: {save_file}")

    def get_task(self, img_path: str = None) -> List[Dict]:
        """
        Get the task from pre-trained LLM
        """

        if img_path is None:
            img_path = self.camera.save_current_image()

        messages = [
            {"role": "system", "content": "You are a helpful assistant."},
            {
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": "The image displays the scene visible to the camera. You are going to fold the given t-shrit. Please select the best current step from the <TASK_LIST> to fold the clothes as fast as possible\n.",
                    },
                    {"type": "image", "image": str(img_path), "min_pixels": 640 * 480, "max_pixels": 1280 * 960},
                    {"type": "text", "text": "<TASK_LIST>: " + self.tasks_list},
                ],
            },
        ]

        example = "output format:\n" "Grab the shoulders and throw the clothes"
        messages.append({"role": "user", "content": example})
        response = dashscope.MultiModalConversation.call(model="qwen2.5-vl-72b-instruct", messages=messages)
        response_text = response["output"].choices[0].message["content"][0]["text"]
        log.info(f"Planned current task: {response_text}")
        return response_text

    def get_points_from_img(self, img_path: str = None):
        if img_path is None:
            img_path = self.camera.save_current_image()

        # task = self.get_task(img_path)
        task = "Fold the sleeves on the left and right sides of the t-shirt"

        response_text = self.api(img_path, task=task)
        self.current_task = response_text["task"]
        self.left_points = response_text["action"]["left_arm"]
        self.right_points = response_text["action"]["right_arm"]
        self.response_text = response_text
        return self.left_points, self.right_points, self.current_task

    @staticmethod
    def extract_coordinates(arm_data):
        text_data = str(arm_data)

        pick_match = PICK_RE.search(text_data)
        if pick_match:
            pick_coords = [int(pick_match.group(1)), int(pick_match.group(2))]
        else:
            pick_coords = [0, 0]

        place_match = PLACE_RE.search(text_data)
        if place_match:
            place_coords = [int(place_match.group(1)), int(place_match.group(2))]
        else:
            place_coords = [0, 0]

        return {"pick": pick_coords, "place": place_coords}


class ClothesFolder:

    delay_time = 0.01
    timeout_threshold = 5
    fixed_raise_joint = [-1.23, 96.491, -127.964, -3.408, 50.928, 0.0]

    def __init__(self, left_arm: PiperArm, right_arm: PiperArm, camera: Camera):

        self.left_arm = left_arm
        self.right_arm = right_arm
        self.planner = VlmAPI(camera=camera)
        self.camera = camera
        self.T_left = CameraBaseTransforamtion("data/calibrations/" + left_arm.can)
        self.T_right = CameraBaseTransforamtion("data/calibrations/" + right_arm.can)

        self.go_zero()

    def go_zero(self):
        self.left_arm.go_zero()
        self.right_arm.go_zero()
        return self.__dual_wait_motion_done()

    def dual_move(self, left_xyz: Sequence[float], right_xyz: Sequence[float]):
        if any(left_xyz):
            self.left_arm.move_to(left_xyz)
        if any(right_xyz):
            self.right_arm.move_to(right_xyz)

    def dual_move_and_grasp(self, left_xyz: Sequence[float], right_xyz: Sequence[float]):

        self.left_arm.open_gripper()
        self.right_arm.open_gripper()

        self.dual_move(left_xyz, right_xyz)

        self.left_arm.close_gripper()
        self.right_arm.close_gripper()

    def dual_move_and_release(self, left_xyz: Sequence[float], right_xyz: Sequence[float]):
        self.dual_move(left_xyz, right_xyz)
        self.left_arm.open_gripper()
        self.right_arm.open_gripper()

    def run(self):
        left, right, task = self.planner.get_points_from_img()
        self.planner.save_annotated_image()

        left_pick = self.xyz_from_pixels(left["pick"], "left")
        left_place = self.xyz_from_pixels(left["place"], "left")
        right_pick = self.xyz_from_pixels(right["pick"], "right")
        right_place = self.xyz_from_pixels(right["place"], "right")

        self.dual_move_and_grasp(left_pick, right_pick)

        # move to the middle points
        left_mid = (left_pick + left_place) / 2
        right_mid = (right_pick + right_pick) / 2
        self.dual_move(left_mid, right_mid)

        # place the clothes
        self.dual_move_and_release(left_place, right_place)

    def xyz_from_pixels(self, xy: Sequence[int], arm: str):
        camera_3d = self.camera.get_3d_point_from_pixel(*xy)
        if arm == "left":
            return self.T_left(camera_3d)

        elif arm == "right":
            return self.T_right(camera_3d)

    def __dual_wait_motion_done(self):
        """
        Detect the motion status for the both arms
        """
        start_t = time.time()
        time.sleep(self.delay_time)
        while self.left_arm.is_in_motion() or self.right_arm.is_in_motion():
            time.sleep(self.delay_time)
            if time.time() - start_t > self.timeout_threshold:
                print(f"Excution exceeds timeout threshold={self.timeout_threshold}s")
                return False
        return True


if __name__ == "__main__":
    from camera.camera import Camera

    # camera = Camera()
    # img = camera.save_current_image()

    # self = ClothesPlanner(Camera())
    # self.get_points_from_img()
    # self.save_annotated_image()
    # from model.grip_joint_mapping import EndJointModel
    # self = ClothesFolder(
    #     PiperArm("can_left", end_joint_model=EndJointModel(model_file="data/weights/nn_3d_6d_full_space.ckp"), wait_execution=False),
    #     PiperArm("can_right", end_joint_model=EndJointModel(model_file="data/weights/nn_3d_6d_full_space.ckp"), wait_execution=False),
    #     Camera(),
    # )

    # self.dual_move_and_grasp([518, -20, 0], [518, -20, 0])

    # robot.dual_grasp([518, -20, 0], [518, -20, 0])
    # robot.dual_release([318, 90, 50], [318, -90, 50])
