"""
@Description :   Qwen2.5-VL 模型 (本地)
@Author      :   tqychy 
@Time        :   2025/08/23 17:09:37
"""
import torch
from qwen_vl_utils import process_vision_info
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration


class Qwen2_5_VL:
    def __init__(self, *args, model_path):
        self.cfg, self.logger = args
        self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
            model_path,
            dtype=torch.bfloat16,
            attn_implementation="flash_attention_2",
            device_map="auto",
            local_files_only=True
        )
        self.processor = AutoProcessor.from_pretrained(model_path)

    def __call__(self, inputs):
        text = self.processor.apply_chat_template(
            inputs, tokenize=False, add_generation_prompt=True
        )
        image_inputs, video_inputs = process_vision_info(inputs)
        inputs = self.processor(
            text=[text],
            images=image_inputs,
            videos=video_inputs,
            padding=True,
            return_tensors="pt",
        )
        inputs = inputs.to("cuda")

        generated_ids = self.model.generate(**inputs, max_new_tokens=128)
        generated_ids_trimmed = [
            out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
        ]
        output_text = self.processor.batch_decode(
            generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
        )
        
        return output_text[0]
    
    @staticmethod
    def make_prompt(image_path, sentence, category, mode):
        if mode == "rec":
            prompt = f"""
            <image>
            Here is a description of the objects in the figure. 
            Please enclose the corresponding positions using coordinate boxes. 
            Examples of coordinate value formats: [x1, y1, x2, y2]
            Please only output boxes in a fixed format like:
            ```json\n[\n\t{{"bbox_2d": [x1, y1, x2, y2], "label": "..."}}\n]\n```
            and do not output any unnecessary content.

            Description:\n{sentence}
            """
        elif mode == "detect":
            prompt = f"""
            <image>
            Here is a series of category labels separated by "." and an image. Please identify ALL objects in the image corresponding to the mentioned category labels, and meet the following requirements:
            1. There may be multiple objects for each category label; please identify ALL of them.
            2. Return a JSON-formatted list of dictionaries. Each dictionary must contain the "bbox_2d", "label" and "score" of the identified object. An example of the output format is: [{{"bbox_2d": [456, 249, 507, 324], "label": "Dog", "score": 0.67}}, {{"bbox_2d": [495, 261, 528, 322], "label": "Dog", "score": 0.84}}, {{"bbox_2d": [392, 323, 447, 401], "label": "Monkey", "score": 0.46}}].
            3. Do not output any extra information.
            4. The returned category labels must EXACTLY match the input category labels.
            Categories: {category}
            """
        else:
            raise ValueError(f"Unknown task type! {mode}")
        inputs = [
            {
                "role": "user",
                "content": [
                    {
                        "type": "image",
                        "image": image_path,
                    },
                    {"type": "text", "text": prompt},
                ],
            }
        ]
        return inputs
    
    @staticmethod
    def covert_formatted_bbox(bbox, image_shape):
        """
        将 [x1, y1, x2, y2] 格式的 bbox 转换成 [x_min, y_min, width, height] 格式的​
        """
        x1, y1, x2, y2 = bbox
        x_min = min(x1, x2)
        y_min = min(y1, y2)
        width = max(x1, x2) - x_min
        height = max(y1, y2) - y_min
        return [x_min, y_min, width, height]
    

if __name__ == "__main__":
    inputs = [
    [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": "/root/vlm_grounding_test/dataset/scripts/refcoco/data/images/mscoco/images/train2014/COCO_train2014_000000000025.jpg",
                },
                {"type": "text", "text": "Describe this image."},
            ],
        }
    ],
    [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": "/root/vlm_grounding_test/dataset/scripts/refcoco/data/images/mscoco/images/train2014/COCO_train2014_000000000009.jpg",
                },
                {"type": "text", "text": "Describe this image."},
            ],
        }
    ],
    ]
    model = Qwen2_5_VL(None, None)

    print(model(inputs))


