dr-inference / handler.py
grantpitt's picture
format
b86e84d
from typing import Dict, List, Any
from transformers import CLIPTokenizer, CLIPModel
import numpy as np
import os
class EndpointHandler:
def __init__(self, path=""):
"""
Initialize the model
"""
self.sign_ids = np.load(os.path.join(path, "sign_ids.npy"))
self.sign_embeddings = np.load(
os.path.join(path, "vanilla_large-patch14_image_embeddings_normalized.npy")
)
hf_model_path = "openai/clip-vit-large-patch14"
self.model = CLIPModel.from_pretrained(hf_model_path)
self.tokenizer = CLIPTokenizer.from_pretrained(hf_model_path)
def __call__(self, data: Dict[str, Any]) -> List[float]:
"""
data args:
inputs (:obj: `str` | `PIL.Image` | `np.array`)
kwargs
Return:
A :obj:`list` | `dict`: will be serialized and returned
"""
token_inputs = self.tokenizer(
[data["inputs"]], padding=True, return_tensors="pt"
)
query_embed = self.model.get_text_features(**token_inputs)
np_query_embed = query_embed.detach().cpu().numpy()[0]
np_query_embed /= np.linalg.norm(np_query_embed)
# Compute the cosine similarity; note the embeddings are normalized.
# This weight is arbitrary, but makes the results easier to think about
w = 2.5
threshold = 0.475
cos_similarites = w * (self.sign_embeddings @ np_query_embed)
count_above_threshold = np.sum(cos_similarites > threshold)
sign_id_arg_rankings = np.argsort(cos_similarites)[::-1]
threshold_id_arg_rankings = sign_id_arg_rankings[:count_above_threshold]
result_sign_ids = self.sign_ids[threshold_id_arg_rankings]
result_sign_scores = cos_similarites[threshold_id_arg_rankings]
return [result_sign_ids.tolist(), result_sign_scores.tolist()]