VishalD1234 commited on
Commit
28421c3
1 Parent(s): 126a6cb

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +48 -0
handler.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from PIL import Image
3
+ import torch
4
+ import os
5
+ from io import BytesIO
6
+ from transformers import BlipForConditionalGeneration, BlipProcessor
7
+ # -
8
+
9
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
+
11
+ class EndpointHandler():
12
+ def __init__(self, path=""):
13
+ # load the optimized model
14
+
15
+ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
16
+ self.model = BlipForConditionalGeneration.from_pretrained(
17
+ "Salesforce/blip-image-captioning-base"
18
+ ).to(device)
19
+ self.model.eval()
20
+ self.model = self.model.to(device)
21
+
22
+
23
+
24
+ def __call__(self, data: Any) -> Dict[str, Any]:
25
+ """
26
+ Args:
27
+ data (:obj:):
28
+ includes the input data and the parameters for the inference.
29
+ Return:
30
+ A :obj:`dict`:. The object returned should be a dict of one list like {"captions": ["A hugging face at the office"]} containing :
31
+ - "caption": A string corresponding to the generated caption.
32
+ """
33
+ inputs = data.pop("inputs", data)
34
+ parameters = data.pop("parameters", {})
35
+
36
+ raw_images = [Image.open(BytesIO(_img)) for _img in inputs]
37
+
38
+ processed_image = self.processor(images=raw_images, return_tensors="pt")
39
+ processed_image["pixel_values"] = processed_image["pixel_values"].to(device)
40
+ processed_image = {**processed_image, **parameters}
41
+
42
+ with torch.no_grad():
43
+ out = self.model.generate(
44
+ **processed_image
45
+ )
46
+ captions = self.processor.batch_decode(out, skip_special_tokens=True)
47
+ # postprocess the prediction
48
+ return {"captions": captions}