File size: 1,468 Bytes
fdc844c 8df5e7a 3077814 c67db7a 3077814 fdc844c 3077814 fdc844c 3077814 3fac44f 3077814 70c64cd 3077814 70c64cd 8df5e7a fdc844c 3077814 8df5e7a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import os
from typing import Dict, List, Any
from PIL import Image
import jax
from transformers import ViTFeatureExtractor, AutoTokenizer, FlaxVisionEncoderDecoderModel
class PreTrainedPipeline():
def __init__(self, path=""):
model_dir = os.path.join(path, "ckpt_epoch_3_step_6900")
self.model = FlaxVisionEncoderDecoderModel.from_pretrained(model_dir)
self.feature_extractor = ViTFeatureExtractor.from_pretrained(model_dir)
self.tokenizer = AutoTokenizer.from_pretrained(model_dir)
max_length = 16
num_beams = 4
self.gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
@jax.jit
def _generate(pixel_values):
output_ids = self.model.generate(pixel_values, **self.gen_kwargs).sequences
return output_ids
self.generate = _generate
# compile the model
image_path = os.path.join(path, 'val_000000039769.jpg')
image = Image.open(image_path)
self(image)
image.close()
def __call__(self, inputs: "Image.Image") -> List[str]:
"""
Args:
Return:
"""
pixel_values = self.feature_extractor(images=inputs, return_tensors="np").pixel_values
output_ids = self.generate(pixel_values)
preds = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)
preds = [pred.strip() for pred in preds]
return preds |