from typing import Dict, List, Any from PIL import Image import torch import base64 from io import BytesIO from transformers import AutoProcessor, BlipForConditionalGeneration device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class EndpointHandler(): def __init__(self, path=""): self.processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large") self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to(device) def __call__(self, data: Any) -> List[float]: inputs = data.pop("inputs", data) image = Image.open(BytesIO(base64.b64decode(inputs['image']))) inputs = self.processor(image, inputs['text'], return_tensors="pt").to(device) outputs = self.model.generate(**inputs) return self.processor.decode(outputs[0], skip_special_tokens=True)