import torch
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor
from janus.utils.io import load_pil_images
import torch_npu
from torch_npu.contrib import transfer_to_npu
import time

# 模型路径
model_path = "./pretrained/Janus-Pro-1B"

# 加载 processor 和 tokenizer
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(
    model_path)
tokenizer = vl_chat_processor.tokenizer

# 加载模型
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
    model_path, trust_remote_code=True
)
vl_gpt = vl_gpt.to(torch.float16).cuda().eval()

# 测试图像与问题
image = './test.png'
question = "图像中是否为正常眼底？请严格使用“是”或“否”回答（不加句号）。"

conversation = [
    {
        "role": "<|User|>",
        "content": f"<image_placeholder>\n{question}",
        "images": [image],
    },
    {"role": "<|Assistant|>", "content": ""},
]

# 加载图像
pil_images = load_pil_images(conversation)

# 构造输入
start = time.time()

prepare_inputs = vl_chat_processor(
    conversations=conversation, images=pil_images, force_batchify=True, padding_cst_length=False
).to(vl_gpt.device)
prepare_inputs["pixel_values"] = prepare_inputs["pixel_values"].to(dtype=torch.float16)
print(f"[INFO] 输入准备完成，用时 {time.time() - start:.4f} 秒")

# 编码图像并获取输入嵌入\
with torch.cuda.amp.autocast():
    start = time.time()
    inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
    print(f"[INFO] 图像编码和输入嵌入完成，用时 {time.time() - start:.4f} 秒")

    # 打印输入问题的解码文本（调试用）
    question_tokens = tokenizer.decode(
        prepare_inputs["input_ids"][0].cpu().tolist(),
        skip_special_tokens=False
    )
    print(f"[INPUT] 解码后的输入内容：\n{question_tokens}\"")

    # 执行推理
    start = time.time()
    # 在这里手把pixel values转换为float16

    outputs = vl_gpt.language_model.generate(
        inputs_embeds=inputs_embeds,
        attention_mask=prepare_inputs["attention_mask"],
        pad_token_id=tokenizer.eos_token_id,
        bos_token_id=tokenizer.bos_token_id,
        eos_token_id=tokenizer.eos_token_id,
        max_new_tokens=512,
        do_sample=False,
        use_cache=True,
    )
print(f"[INFO] 推理完成，用时 {time.time() - start:.4f} 秒")

# 解码并输出答案
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=False)
print(f"[OUTPUT] 模型输出文本：\n{answer}")
