e5-large / pipeline.py
radames's picture
radames HF staff
Update pipeline.py
adf1024
import torch
from torch import Tensor
from transformers import AutoTokenizer, AutoModel
from typing import List
import os
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class PreTrainedPipeline():
def __init__(self, path=""):
# load the optimized model
self.tokenizer = AutoTokenizer.from_pretrained(path)
self.model = AutoModel.from_pretrained(path)
self.model.eval()
self.model = self.model.to(device)
def __call__(self, inputs: str) -> List[float]:
"""
Args:
data (:obj:):
includes the input data and the parameters for the inference.
Return:
A :obj:`dict`:. The object returned should be a dict like {"feature_vector": [0.6331314444541931,0.8802216053009033,...,-0.7866355180740356,]} containing :
- "feature_vector": A list of floats corresponding to the image embedding.
"""
batch_dict = self.tokenizer([inputs], max_length=512,
padding=True, truncation=True, return_tensors='pt')
with torch.no_grad():
outputs = self.model(**batch_dict)
embeddings = self.average_pool(outputs.last_hidden_state,
batch_dict['attention_mask'])
return embeddings.cpu().numpy().tolist()
def average_pool(self, last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor:
last_hidden = last_hidden_states.masked_fill(
~attention_mask[..., None].bool(), 0.0)
return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]