|
from typing import Dict, List, Any |
|
from PIL import Image |
|
from transformers import AutoProcessor, AutoModelForVision2Seq |
|
import base64 |
|
from io import BytesIO |
|
|
|
|
|
class EndpointHandler(): |
|
def __init__(self, path=""): |
|
|
|
|
|
|
|
self.model = AutoModelForVision2Seq.from_pretrained(path).to("cuda") |
|
self.processor = AutoProcessor.from_pretrained(path) |
|
|
|
|
|
|
|
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: |
|
|
|
prompt = data.pop("prompt") |
|
image_base64 = data.pop("image_base64") |
|
|
|
image_data = base64.b64decode(image_base64) |
|
image = Image.open(BytesIO(image_data)) |
|
|
|
inputs = self.processor(text=prompt, images=image, return_tensors="pt").to("cuda") |
|
|
|
generated_ids = self.model.generate( |
|
pixel_values=inputs["pixel_values"], |
|
input_ids=inputs["input_ids"], |
|
attention_mask=inputs["attention_mask"], |
|
image_embeds=None, |
|
image_embeds_position_mask=inputs["image_embeds_position_mask"], |
|
use_cache=True, |
|
max_new_tokens=128, |
|
) |
|
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0] |
|
|
|
|
|
processed_text = self.processor.post_process_generation(generated_text, cleanup_and_extract=False) |
|
|
|
|
|
|
|
|
|
|
|
processed_text, entities = self.processor.post_process_generation(generated_text) |
|
|
|
|
|
|
|
return [{"processed_text": processed_text}] |
|
|
|
|
|
|
|
|