import glob
import re
import sys
import time
import traceback
from threading import Thread

# 推理， vision language model

from PIL import Image, ImageDraw, ImageFont
from PIL import ImageColor
# sys.path.append(r'D:\SOFT\Python38\Lib\site-packages')

# pip install transformers==4.46.2
# pip install qwen-vl-utils
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor, TextIteratorStreamer
from qwen_vl_utils import process_vision_info
from transformers import Qwen2_5_VLForConditionalGeneration

import json

'''
qwen2.5-vl 推理
'''


def get_vlm_infer():
    device = 'cuda'
    # device = 'cpu'
    # model_path = r'D:\code\other\LLMs\models\Qwen2-VL-2B-Instruct'
    # model_path = r'D:\code\other\LLMs\models\Qwen2.5-VL-3B-Instruct'
    model_path = r'D:\CODE\ZXC\LLMs\models\Qwen2___5-VL-3B-Instruct'
    # default: Load the model on the available device(s)
    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
        model_path, torch_dtype="auto", device_map=device
    )

    # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
    # model = Qwen2VLForConditionalGeneration.from_pretrained(
    #     "Qwen/Qwen2-VL-2B-Instruct",
    #     torch_dtype=torch.bfloat16,
    #     attn_implementation="flash_attention_2",
    #     device_map="auto",
    # )

    # default processer
    processor = AutoProcessor.from_pretrained(model_path)

    # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
    # min_pixels = 256*28*28
    # max_pixels = 1280*28*28
    # processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)

    def infer(messages=[], is_stream=False, tools_schema=None):
        # messages = [
        #     {
        #         "role": "user",
        #         "content": [
        #             {
        #                 "type": "image",
        #                 # "image":"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
        #                 "image": r"D:\code\other\LLMs\demo_1723169599.7405472.png",
        #                 "resized_height": 56,
        #                 "resized_width": 56,
        #             },
        #             {"type": "text", "text": "请描述这张图片."},
        #         ],
        #     }
        # ]

        # if is_tool_call:  # 工具调用预处理：构建提示词
        #     pass

        # 预处理
        text = processor.apply_chat_template(
            messages, tools=tools_schema, tokenize=False, add_generation_prompt=True
        )  # 含1个image_pad, video_pad
        print(text)
        image_inputs, video_inputs = process_vision_info(messages)  # load img, resize
        inputs = processor(
            text=[text],  # 含placeholder， 未分词未embd
            images=image_inputs,  # bchw 然后切块?再flatten -->16*1176
            videos=video_inputs,
            padding=True,
            return_tensors="pt",
        )  # img,video分块, text_template中image_pad, video_pad 1转n,再转tokenid
        # dummy_image = Image.open(screenshot)
        # inputs = processor(text=[text], images=[dummy_image], padding=True, return_tensors="pt").to('cuda')
        inputs = inputs.to(device)  # {} 'input_ids' 1*27 'pixel_values' 16*1176  ...

        # Inference: Generation of the output 推理
        if not is_stream:
            generated_ids = model.generate(**inputs, max_new_tokens=5120,
                                           synced_gpus=False)  # img transformert推理得到img_embd,再嵌入text_embd,再transformer推理
            generated_ids_trimmed = [
                out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
            ]
            output_text = processor.batch_decode(
                generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
            )  # 后处理
            # print(output_text)
            return output_text # [str]
        else:
            streamer = TextIteratorStreamer(tokenizer=processor, skip_special_tokens=True, skip_prompt=True,
                                            timeout=50)  # s 生成器？
            kwargs = {**inputs, 'streamer': streamer, 'max_new_tokens': 5120,
                      'synced_gpus': False}  # 格式model_inputs.input_ids

            # Generation
            tmp_thread = Thread(target=model.generate, kwargs=kwargs)  # 结果依次放入streamer内部队列
            tmp_thread.start()  # todo 生命周期
            # # # 实时输出生成的文本
            # for text in streamer: # __next__ 出队
            #     print(text)
            return streamer

    # return infer, processor
    return infer


def test_get_vlm_infer():
    vlm_infer = get_vlm_infer()
    # messages = [
    #     {
    #         "role": "user",
    #         "content": [
    #             {
    #                 "type": "image",
    #                 # "image":"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
    #                 "image": r"D:\CODE\ZXC\LLMs\demo_1723169672.5372248.png",
    #                 # "image": r"D:\data\231207huoni\trainV8Seg_chdq\add_imgs\20240701\img_4_20240702_092158_915_yd_screw_NG_img.jpg",
    #                 "resized_height": 56,
    #                 "resized_width": 56,
    #             },
    #             # {"type": "text", "text": "请描述这张图片."},
    #             {"type": "text", "text": "这是什么"},
    #         ],
    #     }
    # ]
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    # "image":"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
                    "image": "000100.jpg",
                    # "image": r"D:\data\231207huoni\trainV8Seg_chdq\add_imgs\20240701\img_4_20240702_092158_915_yd_screw_NG_img.jpg",
                    "resized_height": 640,
                    "resized_width": 640,
                },
                # {"type": "text", "text": "请描述这张图片."},
                # {"type": "text", "text": "这是人员过安检场景，乘客将各自的行李放置在灰色框里，每个灰色框上贴有编号，请指出图中每个人和框的对应关系"},
                {"type": "text",
                 "text": "这是人员过安检场景，乘客将各自的行李放置在灰色框里，每个灰色框上贴有编号，以json格式输出每个框的坐标和编号"},
            ],
        }
    ]
    t = time.time()
    output_text = vlm_infer(messages, is_stream=False)
    print(output_text)
    print(f'{time.time() - t}s')


def test_get_vlm_inferx():  # 包含画图逻辑
    import ast
    from PIL import Image, ImageDraw, ImageFont
    from PIL import ImageColor
    import xml.etree.ElementTree as ET
    additional_colors = [colorname for (colorname, colorcode) in ImageColor.colormap.items()]

    def parse_json(json_output):
        # Parsing out the markdown fencing
        lines = json_output.splitlines()
        for i, line in enumerate(lines):
            if line == "```json":
                json_output = "\n".join(lines[i + 1:])  # Remove everything before "```json"
                json_output = json_output.split("```")[0]  # Remove everything after the closing "```"
                break  # Exit the loop once "```json" is found
        return json_output

    def plot_bounding_boxes(im, bounding_boxes, input_width, input_height):
        """
        Plots bounding boxes on an image with markers for each a name, using PIL, normalized coordinates, and different colors.

        Args:
            img_path: The path to the image file.
            bounding_boxes: A list of bounding boxes containing the name of the object
             and their positions in normalized [y1 x1 y2 x2] format.
        """

        # Load the image
        img = im
        width, height = img.size
        print(img.size)
        # Create a drawing object
        draw = ImageDraw.Draw(img)

        # Define a list of colors
        colors = [
                     'red',
                     'green',
                     'blue',
                     'yellow',
                     'orange',
                     'pink',
                     'purple',
                     'brown',
                     'gray',
                     'beige',
                     'turquoise',
                     'cyan',
                     'magenta',
                     'lime',
                     'navy',
                     'maroon',
                     'teal',
                     'olive',
                     'coral',
                     'lavender',
                     'violet',
                     'gold',
                     'silver',
                 ] + additional_colors

        # Parsing out the markdown fencing
        bounding_boxes = parse_json(bounding_boxes)

        # font = ImageFont.truetype(
        #     r"C:\Users\Dell\Downloads\Noto_Sans_SC,Roboto\Noto_Sans_SC\static\NotoSansSC-Regular.ttf", size=14)
        font = None
        try:
            json_output = ast.literal_eval(bounding_boxes)
        except Exception as e:
            end_idx = bounding_boxes.rfind('"}') + len('"}')
            truncated_text = bounding_boxes[:end_idx] + "]"
            json_output = ast.literal_eval(truncated_text)

        # Iterate over the bounding boxes
        for i, bounding_box in enumerate(json_output):
            # Select a color from the list
            color = colors[i % len(colors)]

            # Convert normalized coordinates to absolute coordinates
            abs_y1 = int(bounding_box["bbox_2d"][1] / input_height * height)
            abs_x1 = int(bounding_box["bbox_2d"][0] / input_width * width)
            abs_y2 = int(bounding_box["bbox_2d"][3] / input_height * height)
            abs_x2 = int(bounding_box["bbox_2d"][2] / input_width * width)

            if abs_x1 > abs_x2:
                abs_x1, abs_x2 = abs_x2, abs_x1

            if abs_y1 > abs_y2:
                abs_y1, abs_y2 = abs_y2, abs_y1

            # Draw the bounding box
            draw.rectangle(
                ((abs_x1, abs_y1), (abs_x2, abs_y2)), outline=color, width=4
            )

            # Draw the text
            if "label" in bounding_box:
                draw.text((abs_x1 + 8, abs_y1 + 6), bounding_box["label"], fill=color, font=font)

        # Display the image
        img.show()

    vlm_infer = get_vlm_infer()

    parameters_schema = {
        "title": "result",
        "description": "用于表示图中人和托盘的位置，以及人和托盘的匹配关系",
        "type": "object",

        "properties": {
            "targets": {
                "type": "array",
                'description': "图中的所有的目标，包括人和托盘",
                "items": {
                    "type": "object",
                    "properties": {
                        "target_id": {
                            "type": "integer",
                            "description": "人或者托盘的唯一标志码"  #
                        },
                        "target_type": {
                            "type": "string",
                            "description": "目标的类型",
                            "enum": [
                                "person",
                                "tray"
                            ],
                        },
                        "location": {
                            "type": "object",
                            "properties": {
                                "x1": {"type": "number",
                                       "description": "目标包围框左上角x坐标"},
                                "y1": {"type": "number",
                                       "description": "目标包围框左上角y坐标"},
                                "x2": {"type": "number",
                                       "description": "目标包围框右下角x坐标"},
                                "y2": {"type": "number",
                                       "description": "目标包围框右下角y坐标"},
                            },
                            "required": ["x1", "y1", "x2", "y2"]
                        },
                        # "matched_target_id": {
                        #     "type": "integer",
                        #     "description": "匹配到的目标标志码,表示person和tray的匹配关系"
                        # },
                    }
                }
            },

        },
        "required": [],
    }
    parameters_schema_str = json.dumps(parameters_schema, ensure_ascii=False, indent=4)
    # format = 'x1,y1,x2,y2,label'

    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    # "image":"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
                    "image": r'D:\CODE\ZXC\LLMs\000100.jpg',
                    # "image": r"D:\data\231207huoni\trainV8Seg_chdq\add_imgs\20240701\img_4_20240702_092158_915_yd_screw_NG_img.jpg",
                    "resized_height": 640,
                    "resized_width": 640,
                },
                # {"type": "text", "text": "请描述这张图片."},
                # {"type": "text", "text": "这是人员过安检场景，乘客将各自的行李放置在灰色框里，每个灰色框上贴有编号，请指出图中每个人和框的对应关系"},
                {"type": "text",
                 # "text": "这是人员过安检场景，乘客将各自的行李放置在灰色框里，每个灰色框上贴有编号，以json格式输出每个框的坐标和编号"},
                 # "text": "这是人员过安检场景，乘客将各自的行李放置在灰色框里，每个灰色框上贴有编号，以json格式输出每个框的坐标和编号"},
                 # "text": f"这是人员过安检场景，乘客将各自的行李放置在灰色托盘里，每个灰色托盘上贴有编号。\n请识别到图中每个人的位置，然后识别出每个灰色托盘的位置和编号，最后识别出每个人和托盘的匹配关系。以json格式输出, schema如下\n{parameters_schema_str}"},
                 "text": f"这是人员过安检场景，乘客将各自的行李放置在灰色托盘里，每个灰色托盘上贴有编号。\n请识别到图中每个人和灰色托盘的位置。以json格式输出, schema如下\n{parameters_schema_str}"},
                # "text": "任务是寻找人和框的匹配关系，请生成实现该任务的提示词"},
            ],
        }
    ]
    t = time.time()
    output_text = vlm_infer(messages, is_stream=False)
    print(output_text)
    print(f'{time.time() - t}s')
    image = Image.open(
        r'D:\DATA\20250519RENBAO\trainV8Pose_closePeople\add_imgs\front_cam_dataset0_100_t23721-2025-05-13\000100.jpg')
    plot_bounding_boxes(image, output_text[0], 640, 640)

def get_inference_with_api():
    from openai import OpenAI
    client = OpenAI(
        # If the environment variable is not configured, please replace the following line with the Dashscope API Key: api_key="sk-xxx".
        api_key='sk-38bdbf76aba641dfb1a671c7259d6dd5',
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        # "https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
    )
    def inference_with_api(messages):
        completion = client.chat.completions.create(
            model="qwen2.5-vl-72b-instruct",
            messages=messages,
        )
        res = [completion.choices[0].message.content]  # <\tool_call> 乱码？
        return res
    return inference_with_api
def get_vlm_infer_function_call_pipe(**test_config): # （function_call）推理 画图
    # img_path = test_config.get('img_path', r'D:\CODE\ZXC\LLMs\000100.jpg')

    # resized_height = test_config.get('resized_height',540)
    # resized_width = test_config.get('resized_width',960)
    resized_height = test_config.get('resized_height',640)
    resized_width = test_config.get('resized_width',640)

    vlm_infer = get_vlm_infer()


    vlm_infer = get_inference_with_api()
    def qwen_seq_2_qwen_tool_calls_message(qwen_res_seq):  # 千问llm输出序列 转千问 tool_calls格式
        try:
            qwen_tool_calls_message = {"role": "assistant", "tool_calls": []}
            # 特定的 qwen 2 lc
            pattern = r'<tool_call>\s*({.*?})\s*</tool_call>'
            matches = re.findall(pattern, qwen_res_seq, re.DOTALL)
            tool_calls = [json.loads(match) for match in matches]  # n个

            # for ind, tool_call_dict in enumerate(tool_calls):  # [{}]
            #     tool_call_dict['id'] = f'{base_id}_{ind}'
            #     if 'arguments' in tool_call_dict.keys():  # qwen转 lc， 因为qwen模板指定输出格式为arguments
            #         tool_call_dict['args'] = tool_call_dict.pop('arguments')
            #         if is_tool_call_chunks:  # is_tool_call_chunks args类型为str
            #             tool_call_dict['args'] = str(tool_call_dict['args'])
            print(f'tool_calls {tool_calls}')
            qwen_tool_calls_message['tool_calls'] = tool_calls
            return qwen_tool_calls_message
        except:
            print('结构化res_seq错误')
            print(traceback.format_exc())
            return {"role": "assistant", "tool_calls": []}


    def call_qwen_tool_calls_message(qwen_tool_calls_message,img_path):# 解码qwen_tool_calls_message，执行工具逻辑（画图）
        # img_path = kwargs.get('img_path',  r'D:\CODE\ZXC\LLMs\000100.jpg')
        # save_path = kwargs.get('save_path', r'D:\CODE\ZXC\LLMs\local_data\res.jpg')
        # resize_width = kwargs.get('resize_width', 640)
        # resize_height = kwargs.get('resize_height', 640)

        saved_path = img_path.replace('.jpg', '_res.jpg')
        show_image_pil = Image.open(img_path).convert("RGB")
        origin_width, origin_height = show_image_pil.size
        # 创建绘图对象
        draw= ImageDraw.Draw(show_image_pil)

        tool_calls = qwen_tool_calls_message['tool_calls']
        for tool_call in tool_calls: # 1个
            tool_call_name_str = tool_call['name']
            tool_call_args_dict = tool_call['arguments']
            if tool_call_name_str == 'draw_img':
                # draw_img(tool_call_args_dict)
                # draw_img 的执行逻辑
                for item_dict in tool_call_args_dict['targets']: # 每个目标
                    cls_name = item_dict['target_type']
                    xyxy = item_dict['location']
                    x1, y1, x2, y2 = xyxy
                    x1 = x1 * origin_width / resized_width
                    x2 = x2 * origin_width / resized_width
                    y1 = y1 * origin_height / resized_height
                    y2 = y2 * origin_height / resized_height
                    # 画框
                    draw.rectangle([x1,y1,x2,y2], outline=(0, 255, 0), width=3)
                    # 绘制文本
                    draw.text((x1, y1), cls_name, fill=(255, 0, 0), font = ImageFont.load_default(50))
                show_image_pil.save(saved_path)
                print(f'saved to {saved_path}')
            else:
                pass

        # todo
        # 返回 tool response messages   {"role": "tool", "content": tool_response_str},
        return None



    tools_schema = [
        {
            "type": "function",
            "function": {
                "name": "draw_img",
                "description": "画图工具，根据图中人和托盘的坐标信息，在图上画出矩形框",
                "parameters": {
                    "targets": {
                        "type": "array",
                        'description': "图中的所有的目标，包括人和托盘",
                        "items": {
                            "type": "object",
                            "properties": {
                                # "target_id": {
                                #     "type": "integer",
                                #     "description": "人或者托盘的唯一标志码"  #
                                # },
                                "target_type": {
                                    "type": "string",
                                    "description": "目标的类型",
                                    "enum": [
                                        "person",
                                        "tray"
                                    ],
                                },
                                # "location": {
                                #     "type": "object",
                                #     "description": "Bounding Box Schema",
                                #     "properties": {
                                #         # "x1": {"type": "number",
                                #         #        "description": "目标包围框左上角x坐标"},
                                #         # "y1": {"type": "number",
                                #         #        "description": "目标包围框左上角y坐标"},
                                #         # "x2": {"type": "number",
                                #         #        "description": "目标包围框右下角x坐标"},
                                #         # "y2": {"type": "number",
                                #         #        "description": "目标包围框右下角y坐标"},
                                #         "x1": {"type": "number"},
                                #         "y1": {"type": "number"},
                                #         "x2": {"type": "number"},
                                #         "y2": {"type": "number"},
                                #     },
                                #     "required": ["x1", "y1", "x2", "y2"],
                                # },
                                "location": {
                                    "type": "array",
                                    "description":  "表示边界框的位置，格式为 [x1, y1, x2, y2]，用于图像中的目标检测任务",
                                    "items": {"type": "number"},
                                    "minItems": 4,
                                    "maxItems": 4
                                }
                                # "matched_target_id": {
                                #     "type": "integer",
                                #     "description": "匹配到的目标标志码,表示person和tray的匹配关系"
                                # },
                            }
                        }
                    },

                },
                # "required": ["x1", "y1", "x2", "y2"],
            }
        },
    ]
    tools_schema_str = json.dumps(tools_schema, ensure_ascii=False, indent=4)
    tools_str = f"\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>{tools_schema_str} \n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{{ \"name\": <function-name>, \"arguments\": <args-json-object>}}\n</tool_call>\n"

    def vlm_infer_function_call_pipe(img_path):
        messages = [
            {"role": "system", "content": f"你擅长识别图片里的物体及物体之间的关系。你有一张图片，里面是人员过安检场景，乘客将各自的行李放置在灰色托盘里，每个灰色托盘上贴有编号。{tools_str}"},
            {
                "role": "user",
                "content": [
                    {
                        "type": "image",
                        # "image":"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
                        "image": img_path,
                        # "image": r"D:\data\231207huoni\trainV8Seg_chdq\add_imgs\20240701\img_4_20240702_092158_915_yd_screw_NG_img.jpg",
                        "resized_height": resized_height,
                        "resized_width": resized_width,
                    },
                    # {"type": "text", "text": "请描述这张图片."},
                    # {"type": "text", "text": "这是人员过安检场景，乘客将各自的行李放置在灰色框里，每个灰色框上贴有编号，请指出图中每个人和框的对应关系"},
                    {"type": "text",
                     # "text": "这是人员过安检场景，乘客将各自的行李放置在灰色框里，每个灰色框上贴有编号，以json格式输出每个框的坐标和编号"},
                     # "text": "这是人员过安检场景，乘客将各自的行李放置在灰色框里，每个灰色框上贴有编号，以json格式输出每个框的坐标和编号"},
                     # "text": f"这是人员过安检场景，乘客将各自的行李放置在灰色托盘里，每个灰色托盘上贴有编号。\n请识别到图中每个人的位置，然后识别出每个灰色托盘的位置和编号，最后识别出每个人和托盘的匹配关系。以json格式输出, schema如下\n{parameters_schema_str}"},
                     "text": f"识别图中每一个人和托盘的位置，调用画图工具，画出每个框的矩形框"},
                    # "text": "任务是寻找人和框的匹配关系，请生成实现该任务的提示词"},
                ],
            },
        ]
        t1 = time.time()
        output_text = vlm_infer(messages=messages,  tools_schema=tools_schema)[0]
        print(f"time: {time.time() - t1}")
        print(output_text)
        qwen_tool_calls_message = qwen_seq_2_qwen_tool_calls_message(output_text)
        print(qwen_tool_calls_message)
        response = call_qwen_tool_calls_message(qwen_tool_calls_message,img_path)
        return response
    return vlm_infer_function_call_pipe

def test_dir_get_vlm_infer_function_call_pipe( **test_config):
    img_glob = test_config.get("img_glob")
    img_ls = glob.glob(img_glob)
    infer_function_call_pipe = get_vlm_infer_function_call_pipe(**test_config)

    for img_path in img_ls:
        print(img_path)
        response = infer_function_call_pipe(img_path)

if __name__ == '__main__':
    # test_get_vlm_infer()
    # test_get_vlm_inferx()

    test_config = {
        'img_glob': r'D:\CODE\ZXC\LLMs\local_data\test_imgs\*0.jpg'
    }

    test_dir_get_vlm_infer_function_call_pipe(**test_config)
