chitrika commited on
Commit
443bbb6
1 Parent(s): bcaae29

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +23 -0
handler.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import numpy as np
3
+ from transformers import CLIPProcessor, CLIPModel
4
+ from PIL import Image
5
+ from io import BytesIO
6
+ import base64
7
+
8
+ class EndpointHandler():
9
+ def __init__(self, path=""):
10
+ # Preload all the elements you we need at inference.
11
+ self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
12
+ self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
13
+
14
+
15
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
16
+ inputs = data.get("inputs")
17
+ text = inputs.get("text")
18
+ imageData = inputs.get("image")
19
+ image = Image.open(BytesIO(base64.b64decode(imageData)))
20
+ inputs = self.processor(text=text, images=image, return_tensors="pt", padding=True)
21
+ outputs = self.model(**inputs)
22
+ embeddings = outputs.image_embeds.detach().numpy().flatten().tolist()
23
+ return { "embeddings": embeddings }