from openai import OpenAI
import base64
import json
import re
# from requests.packages.urllib3.util.retry import Retry
from urllib3.util import Retry
import requests
from requests.adapters import HTTPAdapter
from PIL import Image
from io import BytesIO
import imghdr
import os

# OpenAI密钥
secret_key = "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"


# 上下文示例图片
img1 = "./gpt/assets/husky1.jpg"
img2 = "./gpt/assets/coffee.jpg"
img3 = "./gpt/assets/ski.jpg"
img4 = "./gpt/assets/husky.jpg"


def download_image(image_url, save_path):
    """根据图片URL下载图片并验证合法性"""
    if image_url.find("jpg") != -1:
        file_extension = ".jpg"
    elif image_url.find("png") != -1:
        file_extension = ".png"
    elif image_url.find("jpeg") != -1:
        file_extension = ".jpeg"
    else:
        file_extension = ".webp"
    save_path = save_path + file_extension

    try:
        session = requests.Session()
        retries = Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
        session.mount('https://', HTTPAdapter(max_retries=retries))
        response = requests.get(image_url, stream=True, timeout=10, verify=False)

        # 检查是否为图片类型
        content_type = response.headers.get('Content-Type')
        if 'image' not in content_type:
            print(f"跳过非图片链接: {image_url}")
            return False

        # 将响应数据转换为字节并用PIL检查
        img_data = BytesIO(response.content)
        img = Image.open(img_data)

        # 调整图片大小为 512x512
        img = img.resize((512, 512), Image.LANCZOS)

        # 保存图片
        img.save(save_path)

        # 验证图片文件是否合法
        if imghdr.what(save_path) is None:
            print(f"下载图片格式不正确: {image_url}")
            os.remove(save_path)
            return False

        print(f"图片已保存：{save_path}")
        return True

    except Exception as e:
        print(f"下载链接响应出错 {image_url}. 错误信息: {e}")
        return False


def get_search_keywords(query):
    """
    加入上下文示例
    You are an assistant.
    Text: '{Husky wearing a straw hat on the snow}',
    Generate multiple short texts containing different parts or variations of the subject text,
    Try to create texts that can capture different parts or variations of the text.
    Please generate the result directly, no need to include other additional explanations.
    """
    # 初始化客户端
    client = OpenAI()
    print("################################################")
    print("请求LLM中......    输入查询：" + query)
    print("################################################")
    # prompt = f"Text: '{query}',Generate multiple short texts containing different parts or variations of the " \
    #          f"subject text,Try to create texts that can capture different parts or variations of the text. " \
    #          f"Please generate the result directly, no need to include other " \
    #          f"additional explanations."
    # completion = client.chat.completions.create(
    #     model="gpt-4o-mini",
    #     messages=[
    #         {
    #             "role": "system",
    #             "content": [
    #                 {
    #                     "type": "text",
    #                     "text": "You are a helpful assistant. Rewrite the input text to modify its meaning, and use "
    #                             "different expressions or language structures, with the goal of generating diverse "
    #                             "versions. The rewritten result must remove some elements of the input text. It can "
    #                             "contain only the subject of the input text or other simplified combinations to "
    #                             "capture similar concepts.Output format: The elements in the output result "
    #                             "must be different. Please output the result as shown below.Keep the language "
    #                             "concise, avoid unnecessary complexity and keep it natural and fluent.The last "
    #                             "result of the output should be the subject of the input text.You can try to "
    #                             "replace part of the subject with a general subject, or replace the background."
    #                 }
    #             ]
    #         },
    #         {
    #             "role": "user",
    #             "content": [
    #                 {
    #                     "type": "text",
    #                     "text": "Husky on the snow wearing a straw hat."
    #                 }
    #             ]
    #         },
    #         {
    #             "role": "assistant",
    #             "content": [
    #                 {
    #                     "type": "text",
    #                     "text": "1. \"Husky on the snow.\"2. \"Dog on the "
    #                             "snow.\"3. \"Husky wearing a straw hat.\"4. \"Dog wearing a straw "
    #                             "hat.\"5. \"Husky.\""
    #                 }
    #             ]
    #         },
    #         {
    #             "role": "user",
    #             "content": [
    #                 {
    #                     "type": "text",
    #                     "text": query
    #                 }
    #             ]
    #         }
    #     ],
    # )
    completion = client.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
            {
                "role": "system",
                "content": [
                    {
                        "type": "text",
                        "text": "Rewrite the given text by simplifying and altering its components while maintaining "
                                "the essence of the subject. Each rewritten version should progressively simplify the "
                                "content, ending with the main subject or a general version of it.\n## Input\n- A "
                                "sentence describing a scene or activity.\n## Output\n- A list of progressively "
                                "simplified versions of the input sentence, ending with the main subject or a general "
                                "version of it.\n## Examples\ninput: \"A cat sitting on a windowsill looking at the "
                                "rain.\"\noutput:\n1. \"Cat on a windowsill observing the rain.\"\n2. \"Cat looking "
                                "at the rain.\"\n3. \"Animal on a windowsill.\"\n4. \"Cat on a windowsill.\"\n5. "
                                "\"Cat.\"\ninput: \"Children playing soccer in the park on a sunny "
                                "day.\"\noutput:\n1. \"Children playing in the park.\"\n2. \"Kids playing soccer on a "
                                "sunny day.\"\n3. \"Youngsters in the park.\"\n4. \"Children in the park.\"\n5. "
                                "\"Children.\""
                    }
                ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": query
                    }
                ]
            }
        ],
    )

    # 处理返回的message
    message = completion.choices[0].message.content
    print("GPT: " + message)
    pattern = r'\d+\.\s*"(.*?)\."\s*'  # 正则表达式匹配序号及双引号包裹且最后带点的内容
    result = [item for item in re.findall(pattern, message, re.MULTILINE)]
    print("################################################")
    print("获取到扩展后的检索语句:")
    for i in range(len(result)):
        print(f"{result[i]}")
    print("################################################")
    return result


def filter_image(image, text):
    """
    LLM筛选图片
    """
    client = OpenAI()
    print("################################################")
    print("image: " + image)
    print("text: " + text)
    print("请求LLM中......    ")
    print("################################################")
    completion = client.chat.completions.create(
        model="gpt-4o",
        messages=[
            {
                "role": "system",
                "content": [
                    {
                        "type": "text",
                        "text": "Filter images by passing them and accompanying text, then evaluate their suitability "
                                "based on specific criteria.\n\n- Describe the image to capture its main elements and "
                                "context, using <description></description> inclusion.\n- Evaluate the image for the "
                                "following conditions:\n1. The image contains a watermark.\n2. The image contains "
                                "obvious title text. Return "
                                "<result>no</result> if the image meets one or more of the above conditions, "
                                "otherwise return <result>yes</result>"
                    }
                ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{encode_image(image)}"
                        }
                    },
                    {
                        "type": "text",
                        "text": text
                    }
                ]
            }
        ],
    )

    message = completion.choices[0].message.content
    pattern = r'<result>(.*?)</result>'
    matches = re.search(pattern, message)
    if matches:
        if matches.group(1) == "yes" or matches.group(1) == "Yes":
            print("===============" + image + "通过LLM筛选" + "=================")
            return True
        else:
            print("===============" + image + "这是一张坏图片" + "===============")
            return False


def encode_image(image_path):
    """
    图片转base64
    """
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')


def get_instruction_first(target_image, attention_heatmap_image, input_text):
    client = OpenAI()
    print("################################################")
    print("target_image: " + target_image)
    print("input_text: " + input_text)
    print("请求LLM中......    ")
    print("################################################")
    completion = client.chat.completions.create(
        model="gpt-4o",
        messages=[
            {
                "role": "system",
                "content": [
                    {
                        "type": "text",
                        "text": "# Role:\nYou are a master image editor.\n## Objective:\n- Analyze the received image "
                                "and text to determine the context they each express.\n- Based on the analysis "
                                "results, generate editing instructions to match the image background with the text "
                                "background.\n## Skills:\n- Image analysis ability to identify and understand the "
                                "background in the image.\n- Text parsing ability to accurately understand the "
                                "background information described in the text.\n## Workflow:\n1. **Image "
                                "Analysis**:\n- Carefully observe the received image to identify and determine the "
                                "background in the image.\n2. **Text Analysis**:\n- Read and analyze the provided "
                                "text to determine whether the text clearly describes the background. If so, "
                                "determine the background content described in the text.\n3. **Comparative "
                                "Analysis**:\n- Compare the image background and the text background to identify the "
                                "differences between them.\n4. **Instruction Generation**:\n- If the text background "
                                "does not match the image background, generate specific editing instructions to make "
                                "the image background match the background described in the text.\n- If the text does "
                                "not clearly give the background or the text background matches the image background, "
                                "the instruction is \"no editing required\".\n## Constraints:\n- Generated "
                                "instructions must only target the background of the image and must not modify any "
                                "other entities in the image, including adding or altering elements related to "
                                "entities.\n- Instructions must be wrapped in the <instruction>"
                                "tag, keep it concise, and minimize the number of instructions.\n## Output format:\n- "
                                "Instructions should be clear, concise, and wrapped in the <instruction> tag.\n- If "
                                "no editing is required, output <instruction>No editing required</instruction>."
                    }
                ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{encode_image(img1)}"
                        }
                    },
                    {
                        "type": "text",
                        "text": "Husky on the snow wearing a straw hat."
                    }
                ]
            },
            {
                "role": "assistant",
                "content": [
                    {
                        "type": "text",
                        "text": "Image Background: Beach.\nText Background: Snow.\n<instruction>Change the background to a snowy landscape.</instruction>"
                    }
                ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{encode_image(img2)}"
                        }
                    },
                    {
                        "type": "text",
                        "text": "A cup of coffee on the grass."
                    }
                ]
            },
            {
                "role": "assistant",
                "content": [
                    {
                        "type": "text",
                        "text": "Image Background: Wooden table.\nText Background: Grass.\n<instruction>Change the background to grass.</instruction>"
                    }
                ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{encode_image(img3)}"
                        }
                    },
                    {
                        "type": "text",
                        "text": "A corgi skiing on the snow"
                    }
                ]
            },
            {
                "role": "assistant",
                "content": [
                    {
                        "type": "text",
                        "text": "Image Background: Snow.\nText Background: Snow.\n<instruction>no editing required.</instruction>"
                    }
                ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{encode_image(img4)}"
                        }
                    },
                    {
                        "type": "text",
                        "text": "Husky is lying down and thinking."
                    }
                ]
            },
            {
                "role": "assistant",
                "content": [
                    {
                        "type": "text",
                        "text": "Image Background: Snow.\nText Background: Not clearly given.\n<instruction>no editing required.</instruction>"
                    }
                ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{encode_image(target_image)}"
                        }
                    },
                    {
                        "type": "text",
                        "text": input_text
                    }
                ]
            }
        ],
    )

    # 处理返回的message
    pattern = r"<instruction>(.*?)</instruction>"
    results = re.findall(pattern, completion.choices[0].message.content)
    print("################################################")
    print("第一阶段编辑指令：")
    print(results)
    print("################################################")
    return results


def get_instruction_second(target_image, attention_heatmap_image, input_text):
    """
    获取第二阶段编辑指令
    """
    client = OpenAI()
    print("################################################")
    print("target_image: " + target_image)
    print("input_text: " + input_text)
    print("请求LLM中......    ")
    print("################################################")
    completion = client.chat.completions.create(
        model="gpt-4o",
        messages=[
            {
                "role": "system",
                "content": [
                    {
                        "type": "text",
                        "text": "# Role:\nYou are an image editing master.\n## Objective:\n- Analyze the received "
                                "image and text to determine the context they each express.\n- Based on the analysis "
                                "results, generate editing instructions to match image entity elements with text "
                                "entity elements.\n## Skills:\n- Image analysis ability to identify and understand "
                                "entity elements in images.\n- Text parsing ability to accurately understand the "
                                "entity element information described in the text.\n## Workflow:\n1. **Image "
                                "Analysis**:\n- Carefully observe the received image to identify and determine the "
                                "entity elements in the image.\n2. **Text Analysis**:\n- Read and analyze the "
                                "provided text to determine whether the text clearly describes the entity element "
                                "information. If so, determine the entity element content described in the text.\n3. "
                                "**Comparative Analysis**:\n- Compare image entity elements with text entity elements "
                                "to identify the differences between them.\n4. **Instruction Generation**:\n- If the "
                                "text entity elements do not match the image entity elements, generate specific "
                                "editing instructions to match the image entity elements with the entity elements "
                                "described in the text.\n- If the text does not explicitly give a property element, "
                                "or the text property element matches the image property element, the instruction is "
                                "\"no editing required\".\n- Make sure all editing instructions strictly avoid "
                                "suggesting changes to the image background.\n## Constraints:\n- The generated "
                                "instructions must only target the image property element and must not modify the "
                                "image background.\n- The instructions must be wrapped in <instruction> tags. Keep it "
                                "simple and minimize the number of instructions.\n## Output format:\n- Output the "
                                "property element information of the image, ignoring the background information, "
                                "the property element information of the text, ignoring the background information, "
                                "and the editing instructions.\n- The instructions should be concise and clear, "
                                "wrapped in <instruction> tags, and if there are multiple instructions, wrapped in "
                                "multiple <instruction> tags.\n- If no editing is required, output <instruction>no "
                                "editing required.</instruction>."
                    }
                ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{encode_image(target_image)}"
                        }
                    },
                    {
                        "type": "text",
                        "text": input_text
                    }
                ]
            }
        ],
    )

    # 处理返回的message
    pattern = r"<instruction>(.*?)</instruction>"
    results = re.findall(pattern, completion.choices[0].message.content)
    print("################################################")
    print("第二阶段编辑指令：")
    print(results)
    print("################################################")
    return results


def generate_image_dall_e3(prompt, save_path, backups_save_path):
    """
    使用dall-e-3生成图片
    """
    client = OpenAI()
    print("################################################")
    print("prompt: " + prompt)
    print("请求Dall-E-3中......    ")
    print("################################################")
    response = client.images.generate(
        model="dall-e-3",
        prompt=prompt,
        size="1024x1024",
        quality="standard",
        n=1,
    )
    url = response.data[0].url
    print("################################################")
    print("Dall-E-3生成图片成功，下载链接：" + url)
    download_image(url, backups_save_path)
    print("图片下载至" + backups_save_path)
    print("################################################")


def get_diff_edit_instruction(target_image, input_text):
    """
    获取差异编辑指令
    """
    client = OpenAI()
    print("################################################")
    print("target_image: " + target_image)
    print("input_text: " + input_text)
    print("请求LLM中......    ")
    print("################################################")
    completion = client.chat.completions.create(
        model="gpt-4o",
        messages=[
            {
                "role": "system",
                "content": [
                    {
                        "type": "text",
                        "text": """
                        ## Task Description
                        You are an intelligent image editing assistant. I will provide you with an image and a text 
                        description. Your task is to identify any inconsistencies between the image content and the 
                        text description, then generate specific editing instructions to make the image align with 
                        the text.
                        Each editing instruction should:
                        1. Focus on ONE inconsistency at a time
                        2. Clearly identify both the original object and the target object
                        3. Follow the specified format
                        ## Editing Format
                        For each edit, provide your response in this format:
                        {
                          "Title": "Brief description of current image state",
                          "Edit": "Description of the desired change",
                          "Before Object": "The object that needs to be replaced",
                          "Object": "The specific object to be added/edited"
                        }
                        ## Important Rules
                        - Each editing instruction must address only ONE object modification
                        - Multiple edits should be presented as separate instructions in sequence
                        - There is no priority for which inconsistency to address first
                        - Be specific about both the original object being replaced and the new object
                        - Include "Before Object" even in cases of addition (use "none" if nothing exists in that position)
                        - If the image already perfectly matches the text description, state that no edits are needed
                        ## Examples
                        ### Example 1:
                        **Text Description:** "A man working at a computer with a cup on the desk."
                        **Image Content:** A man at a desk with a fan but no computer.
                        **First Editing Instruction:**
                        {
                          "Title": "A man working at a desk",
                          "Edit": "A man working at a computer",
                          "Before Object": "none",
                          "Object": "computer"
                        }
                        **Second Editing Instruction:**
                        {
                          "Title": "A man working at a computer with a fan on the desk",
                          "Edit": "A man working at a computer with a cup on the desk",
                          "Before Object": "fan",
                          "Object": "cup"
                        }
                        ### Example 2:
                        **Text Description:** "A woman reading a book in a garden with a dog nearby."
                        **Image Content:** A woman reading a magazine in a garden with a cat.
                        **First Editing Instruction:**
                        {
                          "Title": "A woman reading a magazine in a garden",
                          "Edit": "A woman reading a book in a garden",
                          "Before Object": "magazine",
                          "Object": "book"
                        }
                        **Second Editing Instruction:**
                        {
                          "Title": "A woman reading a book in a garden with a cat",
                          "Edit": "A woman reading a book in a garden with a dog",
                          "Before Object": "cat",
                          "Object": "dog"
                        }
                        """
                    }
                ]
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{encode_image(target_image)}"
                        }
                    },
                    {
                        "type": "text",
                        "text": input_text
                    }
                ]
            }
        ],
    )
    result = parse_llm_result(completion.choices[0].message.content)
    print("################################################")
    print("差异化编辑指令如下：")
    print(result)
    print("################################################")
    return result


def parse_llm_result(response_text):
    """
    解析LLM返回结果, 提取JSON数组
    """
    instructions = []

    # 查找格式为包含Before Object的JSON对象
    json_blocks = re.findall(r'\{.*?"Title".*?"Edit".*?"Before Object".*?"Object".*?\}', response_text, re.DOTALL)

    for block in json_blocks:
        try:
            # 尝试直接解析JSON
            cleaned_block = block.replace("'", '"')  # 替换单引号为双引号以支持JSON解析
            instruction = json.loads(cleaned_block)
            if "Title" in instruction and "Edit" in instruction and "Before Object" in instruction and "Object" in instruction:
                instructions.append(instruction)
                continue
        except:
            pass

        # 如果JSON解析失败，尝试正则提取
        title_match = re.search(r'"Title"\s*:\s*"([^"]*)"', block)
        edit_match = re.search(r'"Edit"\s*:\s*"([^"]*)"', block)
        before_obj_match = re.search(r'"Before Object"\s*:\s*"([^"]*)"', block)
        object_match = re.search(r'"Object"\s*:\s*"([^"]*)"', block)

        if title_match and edit_match and before_obj_match and object_match:
            instructions.append({
                "Title": title_match.group(1),
                "Edit": edit_match.group(1),
                "Before Object": before_obj_match.group(1),
                "Object": object_match.group(1)
            })

    return instructions



