from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import re

# default: Load the model on the available device(s)
model_pth = '/home/mbk/lab/aicg/Qwen2.5-VL-7B/Qwen2.5-VL-7B-Instruct'
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
    model_pth, torch_dtype="auto", device_map="auto"
)

# default processer
input_content = "加油员服务态度特别好！加油站的油价合理！我经常在这里加油"
processor = AutoProcessor.from_pretrained(model_pth)

def gen_prompt(input_content):
    return f"""
请从以下用户评论中分析情感态度并提取支持该态度的词：

<评论内容>
{input_content}
</评论内容>

输出要求：
1. 态度判断：仅输出"positive"或"negative"
2. 词提取：列出1-10个最能支持态度判断的具体词，只保留形容词，如"很好"、"很不好"、"细心"、"坏"...
3. 词必须直接引用原文中的词，不要改写

输出格式：
态度：[positive/negative]
证据：
- [直接引用的词1]
- [直接引用的词2]（如有）
- [直接引用的词3]（如有）
...
"""

def predict(x):
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": gen_prompt(x)},
            ],
        }
    ]

    # Preparation for inference
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to("cuda")

    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )[0]
    # print(output_text)
    lines = output_text.split('\n')
    label = '1' if 'positive' in lines[0] else '0' 
    tks = []
    for line in lines[2:]:
        wd = line[2:].strip()
        start = x.find(wd)
        if start == -1:
            continue
        for i in range(start, start + len(wd)):
            tks.append(i)
    rationals = ""
    for i in range(len(tks)):
        rationals += str(tks[i])
        if i < len(tks) - 1:
            rationals += ','
    return label, rationals


# print(predict(input_content))
