import os
import sys
import time
import copy
import shutil

from mobileagent.api import inference_chat, generate_image_caption
from mobileagent.controller import get_screenshot, tap, slide, type, back, home
from mobileagent.prompt import get_action_prompt, get_reflect_prompt, get_memory_prompt, get_process_prompt
from mobileagent.chat import init_action_chat, init_reflect_chat, init_memory_chat, add_response, add_response_two_image

####################################### Edit your Setting #########################################
# Number of retries
retries = 8

# instruction language setting
prompt_lang = "en"  # zh

# Your ADB path
adb_path = r"..\platform-tools\adb.exe"

# Your GPT-4o API URL
API_url = "http://10.107.105.207:48030/v1"

# Your GPT-4o API Token
token = "EMPTY"

# Choose between "api" and "local". api: use the qwen api. local: use the local qwen checkpoint
caption_call_method = "api"

# Choose between "qwen-vl-plus" and "qwen-vl-max" if use api method. Choose between "qwen-vl-chat" and "qwen-vl-chat-int4" if use local method.
caption_model = "qwen2-vl-72b-instruct"

# If you choose the api caption call method, input your Qwen api here
qwen_api = ""

# You can add operational knowledge to help Agent operate more accurately.
if prompt_lang == "zh":
    add_info = "如果你想要点击一个应用的图标，可以使用动作\"Open app\"。如果想要退出一个应用，可以使用动作\"Home\""
else:
    add_info = "If you want to tap an icon of an app, use the action \"Open app\". If you want to exit an app, use the action \"Home\""

# Reflection Setting: If you want to improve the operating speed, you can disable the reflection agent. This may reduce the success rate.
reflection_switch = True

# Memory Setting: If you want to improve the operating speed, you can disable the memory unit. This may reduce the success rate.
memory_switch = False
###################################################################################################

class MobileAgent:
    output_base_path = ''
    screenshot_path = ''
    screenshot_file = ''
    last_screenshot_file = ''

    def __init__(self, output_base_path:str=''):
        self.output_base_path = output_base_path
        self.screenshot_path = os.path.join(output_base_path, "screenshot") if output_base_path else "screenshot"
        self.screenshot_file = os.path.join(self.screenshot_path, "screenshot.jpg")
        self.last_screenshot_file = os.path.join(self.screenshot_path, "last_screenshot.jpg")

        if not os.path.exists(self.screenshot_path):
            os.mkdir(self.screenshot_path)

    @staticmethod
    def get_perception_infos(adb_path:str, screenshot_file:str)->(dict, int, int):
        try:
            get_screenshot(adb_path, screenshot_file)
        except Exception as e:
            print(str(e))
            time.sleep(1)
            print("Try get_screenshot again")
            get_screenshot(adb_path, screenshot_file)

        return generate_image_caption(screenshot_file, 'BLIP')

    def run(self, instruction: str) -> str:
        result = []

        error_flag = False
        thought_history = []
        summary_history = []
        action_history = []
        summary = ""
        action = ""
        completed_requirements = ""
        memory = ""
        insight = ""

        iter = 0
        while True:
            iter += 1
            # reflection_switch == False , 需要对操作完后的界面截图，判断下一个Decision
            if iter == 1 or (iter != 1 and not reflection_switch):
                perception_infos, width, height = self.get_perception_infos(adb_path, self.screenshot_file)

                keyboard = False
                keyboard_height_limit = 0.9 * height
                for perception_info in perception_infos:
                    if perception_info['coordinates'][1] < keyboard_height_limit:
                        continue
                    if 'ADB Keyboard' in perception_info['text']:
                        keyboard = True
                        break

            prompt_action = get_action_prompt(instruction, perception_infos, width, height, keyboard, summary_history,
                                              action_history, summary, action, add_info, error_flag,
                                              completed_requirements, memory)
            chat_action = init_action_chat()
            chat_action = add_response("user", prompt_action, chat_action, self.screenshot_file)

            output_action = inference_chat(chat_action, 'gpt-4o', API_url, token)
            thought = output_action.split("### Thought ###")[-1].split("### Action ###")[0].replace("\n", " ").replace(
                ":", "").replace("  ", " ").strip()
            summary = output_action.split("### Operation ###")[-1].replace("\n", " ").replace("  ", " ").strip()
            action = output_action.split("### Action ###")[-1].split("### Operation ###")[0].replace("\n", " ").replace(
                "  ", " ").strip()
            chat_action = add_response("assistant", output_action, chat_action)
            status = "#" * 50 + " Decision " + "#" * 50
            print(status)
            print(output_action)
            print('#' * len(status))
            result.append(status)
            result.append(output_action)
            result.append('#' * len(status))

            if memory_switch:
                prompt_memory = get_memory_prompt(insight)
                chat_action = add_response("user", prompt_memory, chat_action)
                output_memory = inference_chat(chat_action, 'gpt-4o', API_url, token)
                chat_action = add_response("assistant", output_memory, chat_action)
                status = "#" * 50 + " Memory " + "#" * 50
                print(status)
                print(output_memory)
                print('#' * len(status))
                result.append(status)
                result.append(output_memory)
                result.append('#' * len(status))
                output_memory = output_memory.split("### Important content ###")[-1].split("\n\n")[0].strip() + "\n"
                if "None" not in output_memory and output_memory not in memory:
                    memory += output_memory

            if "Open app" in action:
                app_name = action.split("(")[-1].split(")")[0]
                # TODO 点((x1+x2)/2, (y1+y2)/2 - (y2-y1)) -> 点((x1+x2)/2, (y1+y2)/2 - 70)
                for perception_info in perception_infos:
                    if perception_info['text'].startswith('text: '):
                        text = perception_info['text'].split(':', 1)[1].strip()
                        if app_name == text:
                            tap(adb_path, perception_info['coordinates'][0], perception_info['coordinates'][1] - 70)
                            break

                    # text, coordinate = ocr(screenshot_file, ocr_detection, ocr_recognition)
                    # tap_coordinate = [0, 0]
                    # for ti in range(len(text)):
                    #     if app_name == text[ti]:
                    #         name_coordinate = [int((coordinate[ti][0] + coordinate[ti][2])/2), int((coordinate[ti][1] + coordinate[ti][3])/2)]
                    #         tap(adb_path, name_coordinate[0], name_coordinate[1]- int(coordinate[ti][3] - coordinate[ti][1]))#
                    #         break

            elif "Tap" in action:
                coordinate = action.split("(")[-1].split(")")[0].split(", ")
                # print('coordinate:',coordinate)
                x, y = int(coordinate[0]), int(coordinate[1])
                for perception_info in perception_infos:
                    if perception_info['coordinates'][0] == x and perception_info['coordinates'][1] == y:
                        print("Tap {}".format(perception_info['text']))
                tap(adb_path, x, y)

            elif "Swipe" in action:
                coordinate1 = action.split("Swipe (")[-1].split("), (")[0].split(", ")
                coordinate2 = action.split("), (")[-1].split(")")[0].split(", ")
                x1, y1 = int(coordinate1[0]), int(coordinate1[1])
                x2, y2 = int(coordinate2[0]), int(coordinate2[1])
                slide(adb_path, x1, y1, x2, y2)

            elif "Type" in action:
                if "(text)" not in action:
                    text = action.split("(")[-1].split(")")[0]
                else:
                    text = action.split(" \"")[-1].split("\"")[0]
                type(adb_path, text)
                back(adb_path)

            elif "Back" in action:
                back(adb_path)

            elif "Home" in action:
                home(adb_path)

            elif "Stop" in action:
                print("Step result: {}".format('Succeed'))
                result.append("Step result: {}".format('Succeed'))
                break

            elif "Fail" in action:
                print("Step result: {}".format('Failed'))
                result.append("Step result: {}".format('Failed'))
                break

            time.sleep(2)

            last_perception_infos = copy.deepcopy(perception_infos)
            last_keyboard = keyboard
            if os.path.exists(self.last_screenshot_file):
                os.remove(self.last_screenshot_file)
            os.rename(self.screenshot_file, self.last_screenshot_file)

            if reflection_switch:
                perception_infos, width, height = self.get_perception_infos(adb_path, self.screenshot_file)

                keyboard = False
                for perception_info in perception_infos:
                    if perception_info['coordinates'][1] < keyboard_height_limit:
                        continue
                    if 'ADB Keyboard' in perception_info['text']:
                        keyboard = True
                        break

                prompt_reflect = get_reflect_prompt(instruction, last_perception_infos, perception_infos, width, height,
                                                    last_keyboard, keyboard, summary, action, add_info)
                chat_reflect = init_reflect_chat()
                chat_reflect = add_response_two_image("user", prompt_reflect, chat_reflect,
                                                      [self.last_screenshot_file, self.screenshot_file])

                output_reflect = inference_chat(chat_reflect, 'gpt-4o', API_url, token)
                # 当前模型有可能返回"### Answer",需要优化
                # reflect = output_reflect.split("### Answer ###")[-1].replace("\n", " ").strip()
                reflect = output_reflect.split("Answer")[-1].strip().split()[-1]
                chat_reflect = add_response("assistant", output_reflect, chat_reflect)
                status = "#" * 50 + " Reflcetion " + "#" * 50
                print(status)
                print(output_reflect)
                print('#' * len(status))
                result.append(status)
                result.append(output_reflect)
                result.append('#' * len(status))

                if 'A' in reflect:
                    thought_history.append(thought)
                    summary_history.append(summary)
                    action_history.append(action)

                    prompt_planning = get_process_prompt(instruction, thought_history, summary_history, action_history,
                                                         completed_requirements, add_info)
                    chat_planning = init_memory_chat()
                    chat_planning = add_response("user", prompt_planning, chat_planning)
                    output_planning = inference_chat(chat_planning, 'gpt-4-turbo', API_url, token)
                    chat_planning = add_response("assistant", output_planning, chat_planning)
                    status = "#" * 50 + " Planning " + "#" * 50
                    print(status)
                    print(output_planning)
                    print('#' * len(status))
                    result.append(status)
                    result.append(output_planning)
                    result.append('#' * len(status))
                    completed_requirements = output_planning.split("### Completed contents ###")[-1].replace("\n",
                                                                                                             " ").strip()

                    error_flag = False

                elif 'B' in reflect:
                    error_flag = True
                    back(adb_path)

                elif 'C' in reflect:
                    error_flag = True

                else:
                    print('Not excepted reflect')

            else:
                # reflection_switch == False , 需要对操作后的界面截图，作为结果截图
                get_screenshot(adb_path, self.screenshot_file)

                thought_history.append(thought)
                summary_history.append(summary)
                action_history.append(action)

                prompt_planning = get_process_prompt(instruction, thought_history, summary_history, action_history,
                                                     completed_requirements, add_info)
                chat_planning = init_memory_chat()
                chat_planning = add_response("user", prompt_planning, chat_planning)
                output_planning = inference_chat(chat_planning, 'gpt-4-turbo', API_url, token)
                chat_planning = add_response("assistant", output_planning, chat_planning)
                status = "#" * 50 + " Planning " + "#" * 50
                print(status)
                print(output_planning)
                print('#' * len(status))
                result.append(status)
                result.append(output_planning)
                result.append('#' * len(status))
                completed_requirements = output_planning.split("### Completed contents ###")[-1].replace("\n",
                                                                                                         " ").strip()

            os.remove(self.last_screenshot_file)

            if iter >= retries:  # 最多尝试retries次 退出循环
                print('尝试了{}次，强制退出！'.format(retries))
                print("Step result: {}".format('Failed'))
                result.append('尝试了{}次，强制退出！'.format(retries))
                result.append("Step result: {}".format('Failed'))
                break

        return '\n'.join(result)

if __name__ == '__main__':
    ma = MobileAgent()
    ma.run(sys.argv[1])