File size: 967 Bytes
b7332d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

import io
import base64
import shutil
from transformers import CLIPProcessor, CLIPModel, CLIPTokenizer



class PreTrainedPipeline():
    def __init__(self, path=""):
        """
        Initialize model
        """
        self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
        self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
        self.tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")

    def __call__(self, inputs: str):
        """
        Args:
            inputs (:obj:`str`):
                a string containing some text
        Return:
            A :obj:`list`list of floats: The features computed by the model.
        """
        inputs = self.tokenizer(inputs, padding=True, return_tensors="pt")
  
        # Compute text embeddings
        with torch.no_grad():
            text_features = self.model.get_text_features(**inputs)
        return text_features[0].tolist()