Usage Via Transformers: Example

#11
by ep5000 - opened

Hi,

I see in the "Use this model" dropdown is shows how to load the model via transformers but is there a complete example demonstrating performing inference on an image via transformers?

After some additional research the following is an example of using this modal via Transformers (i.e. not via VLLM):

# Load model directly
import torch
from PIL import Image
from transformers import AutoProcessor, AutoModelForImageTextToText
from qwen_vl_utils import process_vision_info

device = torch.device("cuda")
model = AutoModelForImageTextToText.from_pretrained(
    "reducto/RolmOCR",
    torch_dtype="auto", device_map="auto",
    trust_remote_code=True)
processor = AutoProcessor.from_pretrained("reducto/RolmOCR", trust_remote_code=True, use_fast=False, device_map="auto")

#model = model.eval().cuda()
#image = Image.open(sys.argv[1])
question = f"Extract the text from this image"
messages = [
    {
        "role": "user",
        "content": [
            {"type": "image", "image": "/home/myuser/image1.jpg"},
            {"type": "text", "text": question},
        ],
    },
]
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
    text=[text],
    images=image_inputs,
    videos=video_inputs,
    padding=True,
    return_tensors="pt",
)
inputs = inputs.to("cuda")
generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [
    out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
    generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)

Sign up or log in to comment