File size: 1,592 Bytes
556d4bd 3e3cd94 556d4bd 57c1626 556d4bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
from typing import Dict, List, Any
import PIL
import torch
import base64
import os
import io
from transformers import ViTImageProcessor, ViTModel
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class PreTrainedPipeline():
def __init__(self, path=""):
self.model = ViTModel.from_pretrained(
pretrained_model_name_or_path=path,
config=os.path.join(path, 'config.json')
)
self.model.eval()
self.model = self.model.to(device)
self.processor = ViTImageProcessor.from_pretrained(
pretrained_model_name_or_path=os.path.join(
path, 'preprocessor_config.json')
)
def __call__(self, data: Any) -> Dict[str, List[float]]:
"""
Args:
data (:dict | str:):
Includes the input data and the parameters for the inference.
Inputs should be an image encoded in base 64.
Return:
A :obj:`dict`:. The object returned should be a dict like
{"feature_vector": [0.6331314444541931,...,-0.7866355180740356,]} containing :
- "feature_vector": A list of floats corresponding to the image embedding.
"""
# decode base64 image to PIL
image = PIL.Image.open(io.BytesIO(base64.b64decode(data)))
inputs = self.processor(images=image, return_tensors="pt")
outputs = self.model(**inputs)
feature_vector = outputs.last_hidden_state[0, 0].tolist()
# postprocess the prediction
return {"feature_vector": feature_vector}
|