from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info

# default: Load the model on the available device(s)Qwen/Qwen2.5-VL-7B-Instruct
model = Qwen2_5_VLForConditionalGeneration.from_pretrained("/mnt/lth/workshop/weights/Qwen/Qwen2___5-VL-7B-Instruct", torch_dtype="auto", device_map="auto")

# default processer
processor = AutoProcessor.from_pretrained("/mnt/lth/workshop/weights/Qwen/Qwen2___5-VL-7B-Instruct")


prompt = f"""
你是一个人体属性分类模型。请观察分析给定图片，并依据以下分类条目，从每个分类的选项中选出与图片相符的内容，按照字典格式输出结果，字典的键为分类名称，值为对应分类中符合图片情况的选项。分类条目如下：
- 性别：['女性', '男性']
- 年龄：['年龄儿童', '年龄成年']
- 发型：['长发', '短发', '光头']
- 配饰：['帽子', '墨镜', '口罩', '围巾']
- 上衣袖长：['短袖', '长袖']
- 上衣颜色：['上衣白色', '上衣黑色', '上衣蓝色', '上衣红色', '上衣绿色', '上衣黄色', '上衣紫色', '上衣粉色', '上衣灰色']
- 下装类型：['长裤', '短裤', '下装长裙', '下装短裙']
- 下装颜色：['下装白色', '下装黑色', '下装蓝色', '下装红色', '下装绿色', '下装黄色', '下装紫色', '下装粉色', '下装灰色']
- 携带物品：['手提包', '单肩包', '双肩包', '行李箱']
- 身高：['身长高', '身长中等', '身长矮']
- 体态：['体态胖', '体态中等', '体态瘦']

如果图片中没有对应的类别特征，那么对应的值就填写 '无明显特征'。

请严格按照示例的表达方式输出，不要有废话：
{{'性别': '女性', '年龄': '年龄成年', '发型': '长发', '上衣袖长':'短袖','配饰': '墨镜', '上衣颜色': '上衣白色', '下装类型': '长裤', '下装颜色': '下装黑色', '携带物品': '无明显特征', '身高': '身长中等', '体态': '体态中等'}}
"""
import os
import pickle
from tqdm import tqdm
mem = {}
base_dir = '/mnt/lth/workshop/data_0409'
for file in tqdm(os.listdir(base_dir)):
    file_path = os.path.join(base_dir,file)
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": file_path,
                },
                 {"type": "text", "text": f"{prompt}"},
            ],
        }
        ]

    # Preparation for inference
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
        )
    inputs = inputs.to("cuda")

    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    mem[file] = output_text[0]
    mem[file] = output_text[0]
    
with open('vlm_qwen_7B_base.pkl','wb') as f:
    pickle.dump(mem,f)
