'''
手机agent
    参考 https://github.com/QwenLM/Qwen2.5-VL/blob/main/cookbooks/mobile_agent.ipynb
    基于 from ..infer_vl_qwen2_5vl import get_vlm_infer
'''
import sys
# D:\code\other\LLMs\llm_py310\python.exe -m pip install git+https://github.com/huggingface/transformers
# D:\code\other\LLMs\llm_py310\python.exe -m pip install qwen-vl-utils
# D:\code\other\LLMs\llm_py310\python.exe -m pip install qwen_agent
# D:\code\other\LLMs\llm_py310\python.exe -m pip install openai
# D:\code\other\LLMs\llm_py310\python.exe -m pip install icecream

from typing import Union, Tuple, List
from qwen_agent.tools.base import BaseTool, register_tool

'''
# 工具提示词(工具描述，调用字段描述格式)；工具调用逻辑
'''


@register_tool("mobile_use")
class MobileUse(BaseTool):
    @property
    def description(self):
        return f"""
Use a touchscreen to interact with a mobile device, and take screenshots.
* This is an interface to a mobile device with touchscreen. You can perform actions like clicking, typing, swiping, etc.
* Some applications may take time to start or process actions, so you may need to wait and take successive screenshots to see the results of your actions.
* The screen's resolution is {self.display_width_px}x{self.display_height_px}.
* Make sure to click any buttons, links, icons, etc with the cursor tip in the center of the element. Don't click boxes on their edges unless asked.
""".strip()

    parameters = {
        "properties": {
            "action": {
                "description": """
The action to perform. The available actions are:
* `key`: Perform a key event on the mobile device.
    - This supports adb's `keyevent` syntax.
    - Examples: "volume_up", "volume_down", "power", "camera", "clear".
* `click`: Click the point on the screen with coordinate (x, y).
* `long_press`: Press the point on the screen with coordinate (x, y) for specified seconds.
* `swipe`: Swipe from the starting point with coordinate (x, y) to the end point with coordinates2 (x2, y2).
* `type`: Input the specified text into the activated input box.
* `system_button`: Press the system button.
* `open`: Open an app on the device.
* `wait`: Wait specified seconds for the change to happen.
* `terminate`: Terminate the current task and report its completion status.
""".strip(),
                "enum": [
                    "key",
                    "click",
                    "long_press",
                    "swipe",
                    "type",
                    "system_button",
                    "open",
                    "wait",
                    "terminate",
                ],
                "type": "string",
            },
            "coordinate": {
                "description": "(x, y): The x (pixels from the left edge) and y (pixels from the top edge) coordinates to move the mouse to. Required only by `action=click`, `action=long_press`, and `action=swipe`.",
                "type": "array",
            },
            "coordinate2": {
                "description": "(x, y): The x (pixels from the left edge) and y (pixels from the top edge) coordinates to move the mouse to. Required only by `action=swipe`.",
                "type": "array",
            },
            "text": {
                "description": "Required only by `action=key`, `action=type`, and `action=open`.",
                "type": "string",
            },
            "time": {
                "description": "The seconds to wait. Required only by `action=long_press` and `action=wait`.",
                "type": "number",
            },
            "button": {
                "description": "Back means returning to the previous interface, Home means returning to the desktop, Menu means opening the application background menu, and Enter means pressing the enter. Required only by `action=system_button`",
                "enum": [
                    "Back",
                    "Home",
                    "Menu",
                    "Enter",
                ],
                "type": "string",
            },
            "status": {
                "description": "The status of the task. Required only by `action=terminate`.",
                "type": "string",
                "enum": ["success", "failure"],
            },
        },
        "required": ["action"],
        "type": "object",
    }

    def __init__(self, cfg=None):
        self.display_width_px = cfg["display_width_px"]
        self.display_height_px = cfg["display_height_px"]
        super().__init__(cfg)

    def call(self, params: Union[str, dict], **kwargs):
        params = self._verify_json_format_args(params)
        action = params["action"]
        if action == "key":
            return self._key(params["text"])
        elif action == "click":
            return self._click(
                coordinate=params["coordinate"]
            )
        elif action == "long_press":
            return self._long_press(
                coordinate=params["coordinate"], time=params["time"]
            )
        elif action == "swipe":
            return self._swipe(
                coordinate=params["coordinate"], coordinate2=params["coordinate2"]
            )
        elif action == "type":
            return self._type(params["text"])
        elif action == "system_button":
            return self._system_button(params["button"])
        elif action == "open":
            return self._open(params["text"])
        elif action == "wait":
            return self._wait(params["time"])
        elif action == "terminate":
            return self._terminate(params["status"])
        else:
            raise ValueError(f"Unknown action: {action}")

    def _key(self, text: str):
        raise NotImplementedError()

    def _click(self, coordinate: Tuple[int, int]):
        raise NotImplementedError()

    def _long_press(self, coordinate: Tuple[int, int], time: int):
        raise NotImplementedError()

    def _swipe(self, coordinate: Tuple[int, int], coordinate2: Tuple[int, int]):
        raise NotImplementedError()

    def _type(self, text: str):
        raise NotImplementedError()

    def _system_button(self, button: str):
        raise NotImplementedError()

    def _open(self, text: str):
        raise NotImplementedError()

    def _wait(self, time: int):
        raise NotImplementedError()

    def _terminate(self, status: str):
        raise NotImplementedError()


from img2text import inference_with_api
from qwen_agent.llm.fncall_prompts.nous_fncall_prompt import (
    NousFnCallPrompt,
    Message,
    ContentItem,
)
from qwen_vl_utils import smart_resize
import json

from PIL import Image, ImageDraw, ImageFont, ImageColor
import base64

def get_inference_with_api():
    from openai import OpenAI
    import base64
    client = OpenAI(
        # If the environment variable is not configured, please replace the following line with the Dashscope API Key: api_key="sk-xxx".
        api_key='sk-38bdbf76aba641dfb1a671c7259d6dd5',
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", # "https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
    )
    def inference_with_api(messages):
        # sys_prompt = "You are a helpful assistant.",
        # model_id = "qwen2.5-vl-72b-instruct"
        # image_path = ''
        # prompt = ''
        # with open(image_path, "rb") as image_file:
        #     base64_image = base64.b64encode(image_file.read()).decode("utf-8")
        # messages = [
        #     {
        #         "role": "system",
        #         "content": [{"type": "text", "text": sys_prompt}]},
        #     {
        #         "role": "user",
        #         "content": [
        #             {
        #                 "type": "image_url",
        #                 "min_pixels": 512 * 28 * 28,
        #                 "max_pixels": 2048 * 28 * 28,
        #                 # Pass in BASE64 image data. Note that the image format (i.e., image/{format}) must match the Content Type in the list of supported images. "f" is the method for string formatting.
        #                 # PNG image:  f"data:image/png;base64,{base64_image}"
        #                 # JPEG image: f"data:image/jpeg;base64,{base64_image}"
        #                 # WEBP image: f"data:image/webp;base64,{base64_image}"
        #                 "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
        #             },
        #             {"type": "text", "text": prompt},
        #         ],
        #     }
        # ]
        completion = client.chat.completions.create(
            model="qwen2.5-vl-72b-instruct",
            messages=messages,
        )
        res = [completion.choices[0].message.content] # <\tool_call> 乱码？
        return res

    return inference_with_api

def get_mobile_prompt_vlm_infer(is_use_api=False):
    sys.path.append(r'D:\code\other\LLMs\algorithms')
    # is_use_api = False
    if is_use_api:
        vlm_infer = get_inference_with_api()
    else:
        from infer_vl_qwen2_5vl import get_vlm_infer
        vlm_infer = get_vlm_infer()

    def build_message(system_prompt: str, user_query: str, screenshot_path: str) -> List[
        dict]:
        """
        构建消息对象。

        :param system_prompt: 系统提示词。
        :param user_query: 用户查询。
        :param screenshot_path: 截图文件路径。
        :param mobile_use_tool: MobileUse 工具实例。
        :return: 构建好的消息对象列表。
        """
        # 打开并处理截图
        dummy_image = Image.open(screenshot_path)
        # resized_height, resized_width = smart_resize(dummy_image.height,
        #                                              dummy_image.width,
        #                                              factor=processor.image_processor.patch_size * processor.image_processor.merge_size,
        #                                              min_pixels=processor.image_processor.min_pixels,
        #                                              max_pixels=processor.image_processor.max_pixels, )
        resized_height, resized_width = smart_resize(dummy_image.height,
                                                     dummy_image.width,
                                                     min_pixels=512 * 28 * 28,
                                                     max_pixels=2048 * 28 * 28, )

        # 构建消息
        mobile_use = MobileUse(
            cfg={"display_width_px": resized_width, "display_height_px": resized_height}
        )
        message = NousFnCallPrompt().preprocess_fncall_messages(
            messages=[
                Message(role="system", content=[ContentItem(text=system_prompt)]), #
                Message(role="user", content=[
                    ContentItem(text=user_query),
                    ContentItem(image=f"file://{screenshot_path}")
                ]),
            ],
            functions=[mobile_use.function],  # 整合工具提示词 到system
            lang=None,
        )
        message = [msg.model_dump() for msg in message]  # to [{},{}]

        if is_use_api:
            for mes in message:
                for ct in mes["content"]:
                    if ct.get('image'):
                        image_path = ct["image"][7:] # file://
                        with open(image_path, "rb") as image_file:
                            base64_image = base64.b64encode(image_file.read()).decode("utf-8")
                        ct["image_url"] = {"url": f"data:image/jpg;base64,{base64_image}"} # 转换为base64
                        ct["type"] = "image_url"
                        ct["min_pixels"] = 512 * 28 * 28
                        ct["max_pixels"] = 2048 * 28 * 28
                        # del ct["image"]
                    elif ct.get('text'):
                        ct["type"] = "text"
                    elif ct.get('image_url'):
                        ct["type"] = "image_url"

        return message



    def mobile_prompt_vlm_infer(system_prompt, user_query, screenshot_path):
        # 使用封装后的函数构建消息
        # system_prompt = "You are a helpful assistant."
        # user_query = 'The user query:在盒马中,打开购物车，选择全选 (You have done the following operation on the current device):'
        # screenshot_path = "./img/mobile_zh_example.jpg"
        message = build_message(system_prompt, user_query, screenshot_path, )
        # sys.path.append(r'D:\code\other\LLMs\algorithms')
        output_text = vlm_infer(message)[0] # batch

        print('output_text')
        print(output_text)
        return output_text

    return mobile_prompt_vlm_infer


def parse_action(output_text, screenshot_path):
    # 显示
    def draw_point(image: Image.Image, point: list, color=None):
        from copy import deepcopy
        if isinstance(color, str):
            try:
                color = ImageColor.getrgb(color)
                color = color + (128,)
            except ValueError:
                color = (255, 0, 0, 128)
        else:
            color = (255, 0, 0, 128)

        overlay = Image.new('RGBA', image.size, (255, 255, 255, 0))
        overlay_draw = ImageDraw.Draw(overlay)
        radius = min(image.size) * 0.05
        x, y = point

        overlay_draw.ellipse(
            [(x - radius, y - radius), (x + radius, y + radius)],
            fill=color  # Red with 50% opacity
        )

        image = image.convert('RGBA')
        combined = Image.alpha_composite(image, overlay)

        return combined.convert('RGB')

    # Qwen will perform action thought function call
    try:
        action = json.loads(output_text.split('<tool_call>\n')[1].split('\n</tool_call>')[0])  # to dict
    except:
        try:
            action = json.loads(output_text.split('<tool_call>\n')[1].split('\n⚗')[0])
        except:
            return

    dummy_image = Image.open(screenshot_path)
    resized_height, resized_width = smart_resize(dummy_image.height,
                                                 dummy_image.width,
                                                 # factor=processor.image_processor.patch_size * processor.image_processor.merge_size,
 )
    display_image = dummy_image.resize((resized_width, resized_height))
    if action['arguments']['action'] == "click":
        display_image = draw_point(dummy_image, action['arguments']['coordinate'], color='green')
        # display(display_image)
        display_image.show()
    else:
        # display(display_image)
        display_image.show()


if __name__ == '__main__':
    system_prompt = "You are a helpful assistant."
    # user_query = 'The user query:在盒马中,打开购物车，选择全选 (You have done the following operation on the current device):'
    user_query = '调用工具，选择花生芝麻汤圆'
    screenshot_path = "./img/mobile_zh_example.jpg"

    # mobile_prompt_vlm_infer= get_mobile_prompt_vlm_infer()
    mobile_prompt_vlm_infer= get_mobile_prompt_vlm_infer(is_use_api=True)
    output_text = mobile_prompt_vlm_infer(system_prompt, user_query, screenshot_path)  # 推理
    parse_action(output_text, screenshot_path)
    # test_get_vlm_infer
