from PIL import Image
# import torch
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
# import requests
# from modelscope import snapshot_download
from gradio_helper import make_demo

min_pixels = 256 * 28 * 28
max_pixels = 1280 * 28 * 28

model_dir = "checkpoint/Qwen2-VL-7B-Instruct-GPTQ-Int4"
model = Qwen2VLForConditionalGeneration.from_pretrained(model_dir,
torch_dtype="auto", device_map="auto")
example_image_path = "demo.jpg"

processor = AutoProcessor.from_pretrained(model_dir, min_pixels=min_pixels, max_pixels=max_pixels)
demo = make_demo(model, processor)
try:
    demo.launch(debug=True)
except Exception:
    demo.launch(debug=True, share=True)

# question = "describe these images stacked by rows and columns. Please first summarize the commonality among these images, e.g., the content. Then present a \
# a comprehensive understanding of these images individually. You can typically detail the content, ambient illumination condition, environments, and the shot view etc. \
# By qualifying the consistency between the [content, illumination, environment, shot view] and each image, please rate the realness of these images by a\
# score between [0-10] (0 indicates fake with poor visual preception quality and 10 denotes real-world scenario with good visual preception quality)"

# messages = [
#     {
#         "role": "user",
#         "content": [
#             {
#                 "type": "image",
#                 "image": f"file://{example_image_path}",
#             },
#             {"type": "text", "text": question},
#         ],
#     }
# ]

# text_prompt = processor.apply_chat_template(messages, add_generation_prompt=True)

# url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"
# image = Image.open(requests.get(url, stream=True).raw)


# inputs = processor(
#     text=[text_prompt], images=[image], padding=True, return_tensors="pt"
# )
# inputs = inputs.to("cuda")  # 将输入数据移至GPU（如果可用）

# output_ids = model.generate(**inputs, max_new_tokens=128)

# generated_ids = [
#     output_ids[len(input_ids) :]
#     for input_ids, output_ids in zip(inputs.input_ids, output_ids)
# ]

# output_text = processor.batch_decode(
#     generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
# )

# print(output_text)
