from threading import Thread

# 推理， vision language model

# pip install transformers==4.46.2
# pip install qwen-vl-utils
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor, TextIteratorStreamer
from qwen_vl_utils import process_vision_info


'''
qwen2-vl 推理
'''
def get_vlm_infer():

    device = 'cuda'
    model_path = r'D:\code\other\LLMs\models\Qwen2-VL-2B-Instruct'
    # default: Load the model on the available device(s)
    model = Qwen2VLForConditionalGeneration.from_pretrained(
        model_path, torch_dtype="auto", device_map=device
    )

    # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
    # model = Qwen2VLForConditionalGeneration.from_pretrained(
    #     "Qwen/Qwen2-VL-2B-Instruct",
    #     torch_dtype=torch.bfloat16,
    #     attn_implementation="flash_attention_2",
    #     device_map="auto",
    # )

    # default processer
    processor = AutoProcessor.from_pretrained(model_path)
    # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
    # min_pixels = 256*28*28
    # max_pixels = 1280*28*28
    # processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)


    def infer(messages=[], is_stream=False):
        # messages = [
        #     {
        #         "role": "user",
        #         "content": [
        #             {
        #                 "type": "image",
        #                 # "image":"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
        #                 "image": r"D:\code\other\LLMs\demo_1723169599.7405472.png",
        #                 "resized_height": 56,
        #                 "resized_width": 56,
        #             },
        #             {"type": "text", "text": "请描述这张图片."},
        #         ],
        #     }
        # ]

        # Preparation for inference 预处理
        text = processor.apply_chat_template(
            messages, tokenize=False, add_generation_prompt=True
        ) # 含1个image_pad, video_pad
        image_inputs, video_inputs = process_vision_info(messages) # load img, resize
        inputs = processor(
            text=[text],# 含placeholder， 未分词未embd
            images=image_inputs, # bchw 然后切块?再flatten
            videos=video_inputs,
            padding=True,
            return_tensors="pt",
        ) # img,video分块, text_template中image_pad, video_pad 1转n,再转tokenid
        inputs = inputs.to(device) # 'input_ids' 'pixel_values' ...

        # Inference: Generation of the output 推理
        if not is_stream:
            generated_ids = model.generate(**inputs, max_new_tokens=128,synced_gpus=False) # img transformert推理得到img_embd,再嵌入text_embd,再transformer推理
            generated_ids_trimmed = [
                out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
            ]
            output_text = processor.batch_decode(
                generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
            )  # 后处理
            # print(output_text)
            return output_text
        else:
            streamer = TextIteratorStreamer(tokenizer=processor, skip_special_tokens=True, skip_prompt=True,timeout=50) # s 生成器？
            kwargs = {**inputs, 'streamer': streamer, 'max_new_tokens': 512,'synced_gpus':False} # 格式model_inputs.input_ids

            # Generation
            tmp_thread = Thread(target=model.generate, kwargs=kwargs) # 结果依次放入streamer内部队列
            tmp_thread.start() # todo 生命周期
            # # # 实时输出生成的文本
            # for text in streamer: # __next__ 出队
            #     print(text)
            return streamer
    return infer

def test_get_vlm_infer():
    vlm_infer = get_vlm_infer()
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    # "image":"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
                    "image": r"D:\code\other\LLMs\demo_1723169599.7405472.png",
                    # "image": r"D:\data\231207huoni\trainV8Seg_chdq\add_imgs\20240701\img_4_20240702_092158_915_yd_screw_NG_img.jpg",
                    "resized_height": 56,
                    "resized_width": 56,
                },
                # {"type": "text", "text": "请描述这张图片."},
                {"type": "text", "text": "这是什么"},
            ],
        }
    ]
    output_text = vlm_infer(messages,is_stream=False)
    print(output_text)

if __name__ == '__main__':
    test_get_vlm_infer()