File size: 840 Bytes
b7332d3 f9cfa47 b7332d3 b2a574a b7332d3 494283f b7332d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import io
import base64
import shutil
import torch
from transformers import CLIPProcessor, CLIPModel, CLIPTokenizer
class PreTrainedPipeline():
def __init__(self, path=""):
"""
Initialize model
"""
self.model = CLIPModel.from_pretrained(path)
self.tokenizer = CLIPTokenizer.from_pretrained(path)
def __call__(self, inputs: str):
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`list of floats: The features computed by the model.
"""
inputs = self.tokenizer(inputs, padding=True, return_tensors="pt")
# Compute text embeddings
with torch.no_grad():
text_features = self.model.get_text_features(**inputs)
return text_features[0].tolist() |