Updated handler.py
Browse files- handler.py +11 -7
handler.py
CHANGED
@@ -4,9 +4,7 @@ from transformers import Blip2Processor, Blip2ForConditionalGeneration
|
|
4 |
|
5 |
from PIL import Image
|
6 |
from io import BytesIO
|
7 |
-
import torch
|
8 |
-
import os
|
9 |
-
|
10 |
|
11 |
|
12 |
class EndpointHandler:
|
@@ -28,11 +26,14 @@ class EndpointHandler:
|
|
28 |
- "caption": A string corresponding to the generated caption.
|
29 |
"""
|
30 |
|
31 |
-
inputs = data
|
32 |
# parameters = data.pop("parameters", {})
|
33 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
36 |
|
37 |
processed_image = self.processor(images=raw_images, return_tensors="pt").to(device)
|
38 |
|
@@ -41,4 +42,7 @@ class EndpointHandler:
|
|
41 |
captions = self.processor.decode(out[0], skip_special_tokens=True)
|
42 |
|
43 |
# postprocess the prediction
|
44 |
-
return {"captions": captions}
|
|
|
|
|
|
|
|
4 |
|
5 |
from PIL import Image
|
6 |
from io import BytesIO
|
7 |
+
import torch, os, base64
|
|
|
|
|
8 |
|
9 |
|
10 |
class EndpointHandler:
|
|
|
26 |
- "caption": A string corresponding to the generated caption.
|
27 |
"""
|
28 |
|
29 |
+
inputs = data["inputs"]
|
30 |
# parameters = data.pop("parameters", {})
|
31 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
32 |
+
|
33 |
+
if type(inputs) is str:
|
34 |
+
inputs = inputs.encode('utf-8')
|
35 |
+
|
36 |
+
raw_images = Image.open(BytesIO(inputs))
|
37 |
|
38 |
processed_image = self.processor(images=raw_images, return_tensors="pt").to(device)
|
39 |
|
|
|
42 |
captions = self.processor.decode(out[0], skip_special_tokens=True)
|
43 |
|
44 |
# postprocess the prediction
|
45 |
+
return {"captions": captions}
|
46 |
+
|
47 |
+
|
48 |
+
EndpointHandler()
|