'''
电脑agent
    参考 https://github.com/QwenLM/Qwen2.5-VL/blob/main/cookbooks/mobile_agent.ipynb
    基于 from ..infer_vl_qwen2_5vl import get_vlm_infer
'''
import sys
import time
# D:\code\other\LLMs\llm_py310\python.exe -m pip install git+https://github.com/huggingface/transformers
# D:\code\other\LLMs\llm_py310\python.exe -m pip install qwen-vl-utils
# D:\code\other\LLMs\llm_py310\python.exe -m pip install qwen_agent
# D:\code\other\LLMs\llm_py310\python.exe -m pip install openai
# D:\code\other\LLMs\llm_py310\python.exe -m pip install icecream
# D:\code\other\LLMs\llm_py310\python.exe -m pip install pyautogui


from typing import Union, Tuple, List

import copy
from qwen_agent.tools.base import BaseTool, register_tool
import pyautogui


'''
# 工具提示词(工具描述（使用说明），调用字段描述格式)；工具调用逻辑
'''
@register_tool("computer_use")
class ComputerUse(BaseTool):
    @property
    def description(self):
        return f"""
Use a mouse and keyboard to interact with a computer, and take screenshots.
* This is an interface to a desktop GUI. You do not have access to a terminal or applications menu. You must click on desktop icons to start applications.
* Some applications may take time to start or process actions, so you may need to wait and take successive screenshots to see the results of your actions. E.g. if you click on Firefox and a window doesn't open, try wait and taking another screenshot.
* The screen's resolution is {self.display_width_px}x{self.display_height_px}.
* Whenever you intend to move the cursor to click on an element like an icon, you should consult a screenshot to determine the coordinates of the element before moving the cursor.
* If you tried clicking on a program or link but it failed to load, even after waiting, try adjusting your cursor position so that the tip of the cursor visually falls on the element that you want to click.
* Make sure to click any buttons, links, icons, etc with the cursor tip in the center of the element. Don't click boxes on their edges unless asked.
""".strip()

    parameters = {
        "properties": {
            "action": {
                "description": """
The action to perform. The available actions are:
* `key`: Performs key down presses on the arguments passed in order, then performs key releases in reverse order.
* `type`: Type a string of text on the keyboard.
* `mouse_move`: Move the cursor to a specified (x, y) pixel coordinate on the screen.
* `click`: Click the left mouse button.
* `left_click`: Click the left mouse button.
* `left_click_drag`: Click and drag the cursor to a specified (x, y) pixel coordinate on the screen.
* `right_click`: Click the right mouse button.
* `middle_click`: Click the middle mouse button.
* `double_click`: Double-click the left mouse button.
* `scroll`: Performs a scroll of the mouse scroll wheel.
* `wait`: Wait specified seconds for the change to happen.
* `terminate`: Terminate the current task and report its completion status.
""".strip(),
                "enum": [
                    "key",
                    "type",
                    "mouse_move",
                    "click"
                    "left_click",
                    "left_click_drag",
                    "right_click",
                    "middle_click",
                    "double_click",
                    "scroll",
                    "wait",
                    "terminate",
                ],
                "type": "string",
            },
            "keys": {
                "description": "Required only by `action=key`.",
                "type": "array",
            },
            "text": {
                "description": "Required only by `action=type`.",
                "type": "string",
            },
            "coordinate": {
                "description": "(x, y): The x (pixels from the left edge) and y (pixels from the top edge) coordinates to move the mouse to. Required only by `action=mouse_move` and `action=left_click_drag`.",
                "type": "array",
            },
            "pixels": {
                "description": "The amount of scrolling to perform. Positive values scroll up, negative values scroll down. Required only by `action=scroll`.",
                "type": "number",
            },
            "time": {
                "description": "The seconds to wait. Required only by `action=wait`.",
                "type": "number",
            },
            "status": {
                "description": "The status of the task. Required only by `action=terminate`.",
                "type": "string",
                "enum": ["success", "failure"],
            },
        },
        "required": ["action"],
        "type": "object",
    }

    def __init__(self, cfg=None):
        self.display_width_px = cfg["display_width_px"]
        self.display_height_px = cfg["display_height_px"]
        super().__init__(cfg)

    def call(self, params: Union[str, dict], **kwargs):
        # params = self._verify_json_format_args(params)
        action = params["action"]

        if action == "double_click":
            return self._double_click(params["coordinate"])
        if action == "left_click":
            return self._left_click(params["coordinate"])
        if action == "click":
            return self._left_click(params["coordinate"])


        # if action in ["left_click", "right_click", "middle_click", "double_click"]:
        #     return self._mouse_click(action)
        elif action == "key":
            return self._key(params["keys"])
        elif action == "type":
            return self._type(params["text"])
        elif action == "mouse_move":
            return self._mouse_move(params["coordinate"])
        elif action == "left_click_drag":
            return self._left_click_drag(params["coordinate"])
        elif action == "scroll":
            return self._scroll(params["pixels"])
        elif action == "wait":
            return self._wait(params["time"])
        elif action == "terminate":
            return self._terminate(params["status"])
        else:
            raise ValueError(f"Invalid action: {action}")

    def _double_click(self, coordinate: Tuple[int, int]): # 鼠标双击
        # raise NotImplementedError()
        x, y = coordinate
        pyautogui.doubleClick(x=x, y=y)  # 在屏幕坐标 (500, 300) 处左键双击
    def _left_click(self, coordinate: Tuple[int, int]): # 鼠标双击
        # raise NotImplementedError()
        x, y = coordinate
        pyautogui.leftClick(x=x, y=y)  # 在屏幕坐标 (500, 300) 处左键双击



    def _mouse_click(self, button: str):
        raise NotImplementedError()


    def _key(self, keys: List[str]):
        raise NotImplementedError()

    def _type(self, text: str):
        raise NotImplementedError()

    def _mouse_move(self, coordinate: Tuple[int, int]):
        raise NotImplementedError()

    def _left_click_drag(self, coordinate: Tuple[int, int]):
        raise NotImplementedError()

    def _scroll(self, pixels: int):
        raise NotImplementedError()

    def _wait(self, time: int):
        raise NotImplementedError()

    def _terminate(self, status: str):
        raise NotImplementedError()


from img2text import inference_with_api
from qwen_agent.llm.fncall_prompts.nous_fncall_prompt import (
    NousFnCallPrompt,
    Message,
    ContentItem,
)
from qwen_vl_utils import smart_resize
import json

from PIL import Image, ImageDraw, ImageFont, ImageColor
import base64

def get_inference_with_api():
    from openai import OpenAI
    import base64
    client = OpenAI(
        # If the environment variable is not configured, please replace the following line with the Dashscope API Key: api_key="sk-xxx".
        api_key='sk-38bdbf76aba641dfb1a671c7259d6dd5',
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", # "https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
    )
    def inference_with_api(messages):
        # sys_prompt = "You are a helpful assistant.",
        # model_id = "qwen2.5-vl-72b-instruct"
        # image_path = ''
        # prompt = ''
        # with open(image_path, "rb") as image_file:
        #     base64_image = base64.b64encode(image_file.read()).decode("utf-8")
        # messages = [
        #     {
        #         "role": "system",
        #         "content": [{"type": "text", "text": sys_prompt}]},
        #     {
        #         "role": "user",
        #         "content": [
        #             {
        #                 "type": "image_url",
        #                 "min_pixels": 512 * 28 * 28,
        #                 "max_pixels": 2048 * 28 * 28,
        #                 # Pass in BASE64 image data. Note that the image format (i.e., image/{format}) must match the Content Type in the list of supported images. "f" is the method for string formatting.
        #                 # PNG image:  f"data:image/png;base64,{base64_image}"
        #                 # JPEG image: f"data:image/jpeg;base64,{base64_image}"
        #                 # WEBP image: f"data:image/webp;base64,{base64_image}"
        #                 "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
        #             },
        #             {"type": "text", "text": prompt},
        #         ],
        #     }
        # ]
        completion = client.chat.completions.create(
            model="qwen2.5-vl-72b-instruct",
            messages=messages,
        )
        res = [completion.choices[0].message.content] # <\tool_call> 乱码？
        return res

    return inference_with_api

def get_mobile_prompt_vlm_infer(is_use_api=False):
    sys.path.append(r'D:\code\other\LLMs\algorithms')
    # is_use_api = False
    if is_use_api:
        vlm_infer = get_inference_with_api()
    else:
        from infer_vl_qwen2_5vl import get_vlm_infer
        vlm_infer = get_vlm_infer()

    messages_qwen = [] # 可追加
    def build_message( user_query: str, screenshot_path: str,output_text=None) -> List[
        dict]:
        """
        构建消息对象。

        :param system_prompt: 系统提示词。
        :param user_query: 用户查询。
        :param screenshot_path: 截图文件路径。
        :param mobile_use_tool: MobileUse 工具实例。
        :return: 构建好的消息对象列表。
        """
        # 打开并处理截图
        dummy_image = Image.open(screenshot_path)
        # resized_height, resized_width = smart_resize(dummy_image.height,
        #                                              dummy_image.width,
        #                                              factor=processor.image_processor.patch_size * processor.image_processor.merge_size,
        #                                              min_pixels=processor.image_processor.min_pixels,
        #                                              max_pixels=processor.image_processor.max_pixels, )
        resized_height, resized_width = smart_resize(dummy_image.height,
                                                     dummy_image.width,
                                                     min_pixels=512 * 28 * 28,
                                                     max_pixels=2048 * 28 * 28, )
        mobile_use = ComputerUse(
            cfg={"display_width_px": resized_width, "display_height_px": resized_height}
        )
        # 构建消息
        if messages_qwen == []:
            messagex = NousFnCallPrompt().preprocess_fncall_messages(
                messages=[
                    Message(role="system", content=[ContentItem(text="You are a helpful assistant.")]), #
                    Message(role="user", content=[
                        ContentItem(text=user_query),
                        ContentItem(image=f"file://{screenshot_path}")
                    ]),
                ],
                functions=[mobile_use.function],  # 整合工具提示词 到system
                lang=None,
            )
            messages_qwen.extend([msg.model_dump() for msg in messagex])  # to [{},{}]
        else: # 追加消息
            # messagex = NousFnCallPrompt().preprocess_fncall_messages(
            #     messages=[
            #         # Message(role="tool", content=[
            #         #     ContentItem(text=output_text),
            #         # ]),
            #         Message(role="user", content=[
            #             ContentItem(text=user_query),
            #             ContentItem(image=f"file://{screenshot_path}")
            #         ]),
            #     ],
            #     functions=[],
            #     lang=None,
            # )
            messagex = [Message(role="user", content=[
                ContentItem(text=user_query),
                ContentItem(image=f"file://{screenshot_path}")
            ]),
            ]
            messages_qwen.extend([msg.model_dump() for msg in messagex])  # to [{},{}]

        if is_use_api:
            for mes in messages_qwen:
                for ct in mes["content"]:
                    if ct.get('image'):
                        image_path = ct["image"][7:] # file://
                        with open(image_path, "rb") as image_file:
                            base64_image = base64.b64encode(image_file.read()).decode("utf-8")
                        jpg = image_path.split('.')[-2] # 'png', 'jpg'
                        ct["image_url"] = {"url": f"data:image/{jpg};base64,{base64_image}"} # 转换为base64
                        ct["type"] = "image_url"
                        ct["min_pixels"] = 512 * 28 * 28
                        ct["max_pixels"] = 2048 * 28 * 28
                        # del ct["image"]
                    elif ct.get('text'):
                        ct["type"] = "text"
                    elif ct.get('image_url'):
                        ct["type"] = "image_url"

        return messages_qwen



    def mobile_prompt_vlm_infer( user_query, screenshot_path,output_text=None):
        # 使用封装后的函数构建消息
        # system_prompt = "You are a helpful assistant."
        # user_query = 'The user query:在盒马中,打开购物车，选择全选 (You have done the following operation on the current device):'
        # screenshot_path = "./img/mobile_zh_example.jpg"
        message = build_message(user_query, screenshot_path,output_text=output_text)
        # sys.path.append(r'D:\code\other\LLMs\algorithms')
        show_message_list = copy.deepcopy(message)
        for show_message_dict in show_message_list:
            content_list = show_message_dict['content']
            for content_dict in content_list:
                for k, v in content_dict.items():
                    if k == 'image_url':
                        content_dict['image_url'] = {}

        formatted_data = json.dumps(show_message_list, indent=4, ensure_ascii=False)
        print(formatted_data)
        output_text = vlm_infer(message)[0] # batch

        print('output_text')
        print(output_text)
        return output_text

    return mobile_prompt_vlm_infer


def parse_computer_use_action(output_text, screenshot_path, is_sim=True):
    # 模拟显示
    def draw_point(image: Image.Image, point: list, color=None):

        if isinstance(color, str):
            try:
                color = ImageColor.getrgb(color)
                color = color + (128,)
            except ValueError:
                color = (255, 0, 0, 128)
        else:
            color = (255, 0, 0, 128)

        overlay = Image.new('RGBA', image.size, (255, 255, 255, 0))
        overlay_draw = ImageDraw.Draw(overlay)
        radius = min(image.size) * 0.05
        x, y = point

        overlay_draw.ellipse(
            [(x - radius, y - radius), (x + radius, y + radius)],
            fill=color  # Red with 50% opacity
        )

        image = image.convert('RGBA')
        combined = Image.alpha_composite(image, overlay)

        return combined.convert('RGB')

    # Qwen will perform action thought function call
    try:
        action = json.loads(output_text.split('<tool_call>\n')[1].split('\n</tool_call>')[0])  # to dict
    except:
        try:
            action = json.loads(output_text.split('<tool_call>\n')[1].split('\n⚗')[0])
        except:
            return
    if is_sim:
        dummy_image = Image.open(screenshot_path)
        resized_height, resized_width = smart_resize(dummy_image.height,
                                                     dummy_image.width,
                                                     # factor=processor.image_processor.patch_size * processor.image_processor.merge_size,
     )
        display_image = dummy_image.resize((resized_width, resized_height))
        if action['arguments']['action'] in ["left_click", "double_click", 'click']:
            display_image = draw_point(dummy_image, action['arguments']['coordinate'], color='green')
            # display(display_image)
            display_image.show()
        else:
            # display(display_image)
            display_image.show()
    else: # 工具执行逻辑
        dummy_image = Image.open(screenshot_path)
        resized_height, resized_width = smart_resize(dummy_image.height,
                                                     dummy_image.width,
                                                     min_pixels=512 * 28 * 28,
                                                     max_pixels=2048 * 28 * 28, )
        computer_use = ComputerUse(
            cfg={"display_width_px": resized_width, "display_height_px": resized_height}
        )
        actionx = action
        computer_use.call(actionx['arguments'])

def test_computer_agent_qwen2_5vl_agent():
    is_sim = False
    system_prompt = "You are a helpful assistant." # Situation
    # user_query = '调用工具，最小化该编程软件界面'
    # pr = '电脑桌面应用程序图标doule_click可以打开，'
    user_query = '调用工具，清空回收站, ' # TASK
    mobile_prompt_vlm_infer = get_mobile_prompt_vlm_infer(is_use_api=True)

    # tao_step = 0
    total_steps = 10
    for tao_step in range(total_steps):
        print('-'*100)
        print(f'tao_step:{tao_step}')
        if not is_sim:
            import pyautogui
            screenshot_path = rf"D:\code\other\LLMs\algorithms\remote_infer\img\test_screenshot{tao_step}.png"
            time.sleep(1)
            screenshot = pyautogui.screenshot() # pil  1920*1080  截取全屏
            screenshot.save(screenshot_path)
        else:
            screenshot_path = r"D:\code\other\LLMs\algorithms\remote_infer\img\test_screenshot1.png"
        # mobile_prompt_vlm_infer= get_mobile_prompt_vlm_infer()
        if tao_step > 0:
            user_query = f'上一步已经执行完毕，请结合图像结果 给出下一步操作，直接给出工具调用的json格式，不要给出其他信息'
        output_text = mobile_prompt_vlm_infer( user_query, screenshot_path)  # 推理 T

        # parse_computer_use_action(output_text, screenshot_path)
        parse_computer_use_action(output_text, screenshot_path, is_sim=is_sim) # A

        # # Obervation
        # print('-'*100)
        # print(f'tao_step:{tao_step}')
        # time.sleep(1)
        # tao_step += 1
        # screenshot_path = rf"D:\code\other\LLMs\algorithms\remote_infer\img\test_screenshot{tao_step}.png"
        # screenshot = pyautogui.screenshot()
        # screenshot.save(screenshot_path) # 是否放到<tool_response></tool_response> ？
        # user_query = f'上一步已经执行完毕，请结合图像结果 给出下一步操作，直接给出工具调用的json格式，不要给出其他信息'
        # # Think
        # output_text = mobile_prompt_vlm_infer(user_query, screenshot_path, output_text=output_text)
        #
        # # A
        # parse_computer_use_action(output_text, screenshot_path, is_sim=is_sim)  # A

def test_():
    import pyautogui

    # 如果需要指定位置
    # pyautogui.click(x=500, y=300)  # 在屏幕坐标 (500, 300) 处左击
    # pyautogui.rightClick(x=500, y=300)
    # pyautogui.moveTo(500, 300, duration=1)  # 移动到 (500, 300)，耗时 1 秒
    pyautogui.doubleClick(x=800, y=300)  # 在屏幕坐标 (500, 300) 处左键双击

    # 截取全屏
    screenshot = pyautogui.screenshot() # pil  1920*1080
    screenshot.show()
    pass

if __name__ == '__main__':
    test_computer_agent_qwen2_5vl_agent()
    # test_()