import dashscope
from typing import List, Tuple, Optional, Dict
import re
import logging

# from mpl_toolkits.mplot3d import Axes3D
# from piper_utils.piper_ik_pinocchio import Arm_IK
dashscope.api_key = "sk-4a47da58c2e64a53bc7b94d0892016be"

log = logging.getLogger(__name__)

# 正则表达式保持不变
PICK_RE = re.compile(r"pick[^[]*\[\s*(\d+)\s*,\s*(\d+)\s*\]", re.I | re.S)
PLACE_RE = re.compile(r"place[^[]*\[\s*(\d+)\s*,\s*(\d+)\s*\]", re.I | re.S)
ANY_RE = re.compile(r"\[\s*(\d+)\s*,\s*(\d+)\s*\]")  # fallback


def extract_coords(text: str) -> Tuple[Optional[Tuple[int, int]], Optional[Tuple[int, int]]]:
    """
    Return (pick_xy, place_xy) or (None, None) if not found.
    Works with:
        point_2d:[x,y]
        "point_2d": [x, y]
        arbitrary whitespace / line breaks
    """
    m_pick = PICK_RE.search(text)
    m_place = PLACE_RE.search(text)

    if m_pick and m_place:
        # 修复这里：返回两个元组而不是四个值
        pick_coord = (int(m_pick.group(1)), int(m_pick.group(2)))
        place_coord = (int(m_place.group(1)), int(m_place.group(2)))
        return pick_coord, place_coord

    # fallback: first two bracket pairs, keep order
    pairs = ANY_RE.findall(text)
    if len(pairs) >= 2:
        (x1, y1), (x2, y2) = pairs[:2]
        return (int(x1), int(y1)), (int(x2), int(y2))

    return None, None

# "The image shows the scene visible to the camera. Please select suitable grasping points for the objects so that the robotic arm can successfully pick up and place the items, assisting with household work.\n"

def build_messages(img_path, task) -> List[Dict]:
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "The image displays the scene visible to the camera. Please select appropriate grasping points (PICK) and placing points (PLACE), so that the robotic arm can successfully move the objects.\n",
                },
                {"type": "image", "image": str(img_path), "min_pixels": 640 * 480, "max_pixels": 1280 * 960},
                {"type": "text", "text": task},
            ],
        },
    ]

    example = "output format:\n" "right_arm or left_arm:{\n" "pick:{\n point_2d:[x, y]\n},\n" "place:{\n point_2d:[x, y]\n}\n},\n" "left_arm or right_arm:do nothing"
    messages.append({"role": "user", "content": example})

    return messages


def response(messages: List[dict]) -> dict:
    """Call the MultiModalConversation API with the given messages."""
    response = dashscope.MultiModalConversation.call(model="qwen2.5-vl-72b-instruct", messages=messages)
    output_text = response["output"].choices[0].message["content"][0]["text"]
    return output_text


def get_placepick_qwen_pixel(image_path: str, task: str) -> Tuple[Optional[Tuple[int, int]], Optional[Tuple[int, int]]]:
    """使用Qwen获取pick和place坐标"""
    try:
        messages = build_messages(image_path, task)
        response_text = response(messages)
        log.info(f"Qwen响应: {response_text}")

        # 提取坐标
        pick_coord, place_coord = extract_coords(response_text)
        log.info(f"   Pick: {pick_coord}")
        log.info(f"   Place: {place_coord}")

        return pick_coord, place_coord
    except Exception as e:
        log.info(f"Qwen调用错误: {str(e)}")
        return None, None


def get_clothes_qwen_pixel(image_path: str, task: str) -> Tuple[Optional[Tuple[int, int]], Optional[Tuple[int, int]]]:
    
    """
    Get the left arm coordinates
    """
    task + "\n In the first step, you need to determine the pixels coordinates of PICK and PLACE of the left arm."
    messages = build_messages(image_path, task)
    response_text = response(messages)
    log.info(f"Qwen响应: {response_text}")

    # left arm 
    left_pick, left_place = extract_coords(response_text)
    log.info(f" Left Pick: {left_pick}")
    log.info(f" Left Place: {left_place}")
    
    task + f"\n The pixel coordinates of left arm is determined: PICK={left_pick}, PLACE=={left_place}. You need to determine the coordinates of the right arm. So that the arms do not collide and they can cooperate to finish the clothes folding task."
    
    messages = build_messages(image_path, task)
    response_text = response(messages)
    log.info(f"Qwen响应: {response_text}")

    # left arm 
    right_pick, right_place = extract_coords(response_text)
    log.info(f" Right Pick: {right_pick}")
    log.info(f" Right Place: {right_place}")
    return {"left_arm": {"pick": left_pick, "place": left_place}, "right_arm": {"pick": right_pick, "place": right_place}}, 


if __name__ == "__main__":
    
    from camera.camera import Camera
    self = Camera(model="L515")
    self.save_current_image()
    self.pick_point_pixel, self.place_point_pixel = get_placepick_qwen_pixel("current_view.jpg", "pick the pink clip to drop in the paper cup")
    self.save_annotated_image("annotated_current_view.jpg")