florentgbelidji HF staff commited on
Commit
74bcad4
1 Parent(s): 9d654a9

Added handler.py

Browse files
Files changed (1) hide show
  1. handler.py +47 -0
handler.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from PIL import Image
3
+ import torch
4
+ import os
5
+ from io import BytesIO
6
+ from transformers import BlipForConditionalGeneration, BlipProcessor
7
+
8
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
+
10
+ class EndpointHandler():
11
+ def __init__(self, path=""):
12
+ # load the optimized model
13
+
14
+ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
15
+ self.model = BlipForConditionalGeneration.from_pretrained(
16
+ "Salesforce/blip-image-captioning-base"
17
+ ).to(device)
18
+ self.model.eval()
19
+ self.model = self.model.to(device)
20
+
21
+
22
+
23
+ def __call__(self, data: Any) -> Dict[str, Any]:
24
+ """
25
+ Args:
26
+ data (:obj:):
27
+ includes the input data and the parameters for the inference.
28
+ Return:
29
+ A :obj:`dict`:. The object returned should be a dict of one list like {"captions": ["A hugging face at the office"]} containing :
30
+ - "caption": A string corresponding to the generated caption.
31
+ """
32
+ inputs = data.pop("inputs", data)
33
+ parameters = data.pop("parameters", {})
34
+
35
+ raw_images = [Image.open(BytesIO(_img)) for _img in inputs]
36
+
37
+ processed_image = self.processor(images=raw_images, return_tensors="pt")
38
+ processed_image["pixel_values"] = processed_image["pixel_values"].to(device)
39
+ processed_image = {**processed_image, **parameters}
40
+
41
+ with torch.no_grad():
42
+ out = self.model.generate(
43
+ **processed_image
44
+ )
45
+ captions = self.processor.batch_decode(out, skip_special_tokens=True)
46
+ # postprocess the prediction
47
+ return {"captions": captions}