from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline
import torch
from tqdm import tqdm

from transformers import TrainingArguments, Trainer, DataCollatorForSeq2Seq
import os
from peft import PeftModel
from datasets import Dataset
import json
from qwen_vl_utils import process_vision_info
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
import torch
from peft import LoraConfig, get_peft_model

test_path = "/home/mbk/rubbish/sen_cls/data/SE-ABSA16_PHNS/test.tsv"
model_pth = '/home/mbk/lab/aicg/Qwen2.5-VL-7B/Qwen2.5-VL-7B-Instruct'  # 可以去魔塔社区的模型库下载，搜名字就能搜到，把模型文件都装到这个路径对应的文件夹里就行
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
    model_pth, torch_dtype=torch.bfloat16, device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_pth)
processor = AutoProcessor.from_pretrained(model_pth)

config = LoraConfig(
    task_type="CAUSAL_LM",
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    inference_mode=False,
    r=64,
    lora_alpha=16,
    lora_dropout=0.05,
    bias="none",
)

peft_model_path = "/home/mbk/rubbish/sen_cls/pp1/QWEN_SE-ABSA16_PHNS/output/Qwen2.5-VL-LoRA/checkpoint-835"
model = PeftModel.from_pretrained(model, peft_model_path, config=config)

def predict(aspect, text):
    prompt = f"""
请完成一个评价对象级情感分类任务。
对象是：{aspect},
评价是：{text},
请输出 正面 或 负面
注意输出格式是一个词（正面 或 负面），不要输出其它
"""
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": prompt},
            ],
        }
    ]
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to("cuda")

    # Inference: Generation of the output
    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )[0]
    # print(output_text.find("积极"))
    if '正面' in output_text:
        return 1
    else:
        return 0


res = "index	prediction\n"

with open(test_path, 'r') as f:
    data = f.read().split('\n')[1:]
    for line in tqdm(data):
        if len(line) == 0:
            continue
        qid, text_a, text_b = tuple(line.split('\t'))
        label = predict(text_a, text_b)
        # print(text_a, text_b)
        # print(label)
        # q = input()
        res += f"{qid}\t{label}\n"

with open('SE-ABSA16_PHNS.tsv', 'w') as f:
    f.write(res)
