sohojoe commited on
Commit
7bef5db
1 Parent(s): 1731153

clip_app_client should return tensors

Browse files
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ HTTP_ADDRESS="http://192.168.7.79:8000"
experimental/clip_app_client.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import numpy as np
3
  import requests
@@ -40,11 +41,16 @@ class ClipAppClient:
40
  :param image_url: str, URL of the image to preprocess
41
  :return: torch.Tensor, preprocessed image
42
  """
43
- response = requests.get(image_url)
44
- input_image = Image.open(BytesIO(response.content)).convert('RGB')
45
- input_image = np.array(input_image)
46
- input_im = Image.fromarray(input_image)
47
- prepro = self.preprocess(input_im).unsqueeze(0).cpu()
 
 
 
 
 
48
  return prepro
49
 
50
  def text_to_embedding(self, text):
@@ -60,6 +66,8 @@ class ClipAppClient:
60
  url = os.environ.get("HTTP_ADDRESS", "http://127.0.0.1:8000/")
61
  response = requests.post(url, files=payload)
62
  embeddings = response.text
 
 
63
  return embeddings
64
 
65
  def image_url_to_embedding(self, image_url):
@@ -75,6 +83,8 @@ class ClipAppClient:
75
  url = os.environ.get("HTTP_ADDRESS", "http://127.0.0.1:8000/")
76
  response = requests.post(url, files=payload)
77
  embeddings = response.text
 
 
78
  return embeddings
79
 
80
  def preprocessed_image_to_embedding(self, image):
@@ -96,5 +106,7 @@ class ClipAppClient:
96
  url = os.environ.get("HTTP_ADDRESS", "http://127.0.0.1:8000/")
97
  response = requests.post(url, files=payload)
98
  embeddings = response.text
 
 
99
  return embeddings
100
 
 
1
+ import json
2
  import os
3
  import numpy as np
4
  import requests
 
41
  :param image_url: str, URL of the image to preprocess
42
  :return: torch.Tensor, preprocessed image
43
  """
44
+ if os.path.isfile(image_url):
45
+ input_image = Image.open(image_url).convert('RGB')
46
+ input_image = np.array(input_image)
47
+ input_image = Image.fromarray(input_image)
48
+ else:
49
+ response = requests.get(image_url)
50
+ input_image = Image.open(BytesIO(response.content)).convert('RGB')
51
+ input_image = np.array(input_image)
52
+ input_image = Image.fromarray(input_image)
53
+ prepro = self.preprocess(input_image).unsqueeze(0).cpu()
54
  return prepro
55
 
56
  def text_to_embedding(self, text):
 
66
  url = os.environ.get("HTTP_ADDRESS", "http://127.0.0.1:8000/")
67
  response = requests.post(url, files=payload)
68
  embeddings = response.text
69
+ embeddings = json.loads(embeddings)
70
+ embeddings = torch.tensor(embeddings)
71
  return embeddings
72
 
73
  def image_url_to_embedding(self, image_url):
 
83
  url = os.environ.get("HTTP_ADDRESS", "http://127.0.0.1:8000/")
84
  response = requests.post(url, files=payload)
85
  embeddings = response.text
86
+ embeddings = json.loads(embeddings)
87
+ embeddings = torch.tensor(embeddings)
88
  return embeddings
89
 
90
  def preprocessed_image_to_embedding(self, image):
 
106
  url = os.environ.get("HTTP_ADDRESS", "http://127.0.0.1:8000/")
107
  response = requests.post(url, files=payload)
108
  embeddings = response.text
109
+ embeddings = json.loads(embeddings)
110
+ embeddings = torch.tensor(embeddings)
111
  return embeddings
112
 
experimental/clip_app_performance_test.py CHANGED
@@ -35,8 +35,7 @@ def process(numbers, send_func, max_workers=10):
35
  futures = [executor.submit(send_func, number) for number in numbers]
36
  for future in as_completed(futures):
37
  n_result, result = future.result()
38
- result = json.loads(result)
39
- print (f"{n_result} : {len(result[0])}")
40
 
41
  if __name__ == "__main__":
42
  n_calls = 300
 
35
  futures = [executor.submit(send_func, number) for number in numbers]
36
  for future in as_completed(futures):
37
  n_result, result = future.result()
38
+ print (f"{n_result} : {result.shape}")
 
39
 
40
  if __name__ == "__main__":
41
  n_calls = 300