Vidensogende commited on
Commit
3cb2e49
1 Parent(s): f24cffe

removed comments

Browse files
Files changed (1) hide show
  1. handler.py +0 -57
handler.py CHANGED
@@ -1,59 +1,3 @@
1
- # import requests
2
- # from PIL import Image
3
- # from transformers import BlipProcessor, BlipForConditionalGeneration
4
- # import torch
5
- # from typing import Dict, List, Any
6
-
7
- # class EndpointHandler():
8
- # def __init__(self, path=""):
9
- # self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
10
- # self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
11
-
12
- # self.device = "cuda" if torch.cuda.is_available() else "cpu"
13
- # self.model.to(self.device)
14
-
15
- # def process_single_image(self, img_url, text=None):
16
- # # Loading and processing the image
17
- # raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
18
- # if text:
19
- # # Conditional image captioning
20
- # inputs = self.processor(raw_image, text, return_tensors="pt").to(self.device)
21
- # else:
22
- # # Unconditional image captioning
23
- # inputs = self.processor(raw_image, return_tensors="pt").to(self.device)
24
-
25
- # out = self.model.generate(**inputs)
26
- # return self.processor.decode(out[0], skip_special_tokens=True)
27
-
28
- # def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
29
- # try:
30
- # print(f"Received data: {data}")
31
-
32
- # if not data or "image_urls" not in data:
33
- # return [{"error": "No image URLs provided in the request."}]
34
-
35
- # img_urls = data.get("image_urls")
36
- # texts = data.get("texts", [None] * len(img_urls)) # Texts are optional for conditional captioning
37
-
38
- # # Check if inputs are for single or multiple images
39
- # if isinstance(img_urls, str):
40
- # img_urls = [img_urls]
41
- # texts = [texts]
42
-
43
- # captions = []
44
- # for img_url, text in zip(img_urls, texts):
45
- # caption = self.process_single_image(img_url, text)
46
- # captions.append({"image_url": img_url, "caption": caption})
47
-
48
- # return captions
49
- # except Exception as e:
50
- # print(f"Error processing data: {e}")
51
- # return [{"error": str(e)}]
52
-
53
- # # You may need to add a function to load this handler if the inference toolkit expects it
54
- # def get_pipeline(model_dir, task):
55
- # return EndpointHandler(model_dir)
56
-
57
  import requests
58
  from PIL import Image
59
  from transformers import BlipProcessor, BlipForConditionalGeneration
@@ -69,7 +13,6 @@ class EndpointHandler():
69
  self.model.to(self.device)
70
 
71
  def process_single_image(self, img_url, text=None):
72
- # Loading and processing the image
73
  raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
74
  if text:
75
  inputs = self.processor(raw_image, text, return_tensors="pt").to(self.device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import requests
2
  from PIL import Image
3
  from transformers import BlipProcessor, BlipForConditionalGeneration
 
13
  self.model.to(self.device)
14
 
15
  def process_single_image(self, img_url, text=None):
 
16
  raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
17
  if text:
18
  inputs = self.processor(raw_image, text, return_tensors="pt").to(self.device)