EMaghakyan commited on
Commit
17063fb
1 Parent(s): fc3fbd2

Removed hardcoded url

Browse files
Files changed (1) hide show
  1. handler.py +2 -3
handler.py CHANGED
@@ -12,7 +12,7 @@ class EndpointHandler:
12
  self.tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
13
 
14
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
15
- parameters = data.pop("parameters", {"mode": "image"})
16
  inputs = data.pop("inputs", data)
17
  with torch.no_grad():
18
  if parameters["mode"] == "text":
@@ -20,8 +20,7 @@ class EndpointHandler:
20
  features = self.model.get_text_features(**inputs)
21
 
22
  if parameters["mode"] == "image":
23
- url = "http://images.cocodataset.org/val2017/000000039769.jpg"
24
- image = Image.open(requests.get(url, stream=True).raw)
25
 
26
  inputs = self.processor(images=image, return_tensors="pt")
27
  features = self.model.get_image_features(**inputs)
 
12
  self.tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
13
 
14
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
15
+ parameters = data.pop("parameters", {"mode": "text"})
16
  inputs = data.pop("inputs", data)
17
  with torch.no_grad():
18
  if parameters["mode"] == "text":
 
20
  features = self.model.get_text_features(**inputs)
21
 
22
  if parameters["mode"] == "image":
23
+ image = Image.open(requests.get(inputs, stream=True).raw)
 
24
 
25
  inputs = self.processor(images=image, return_tensors="pt")
26
  features = self.model.get_image_features(**inputs)