sohojoe commited on
Commit
44fc764
1 Parent(s): ed56b85

move api helpers to api_helper.py

Browse files
Files changed (3) hide show
  1. __pycache__/api_helper.cpython-310.pyc +0 -0
  2. api_helper.py +56 -0
  3. app.py +2 -54
__pycache__/api_helper.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
api_helper.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+ import base64
4
+ import json
5
+ from torchvision.transforms import Compose, Resize, CenterCrop
6
+
7
+ # support sending images as base64
8
+
9
+ def encode_numpy_array(image_np):
10
+ # Flatten the numpy array and convert it to bytes
11
+ image_bytes = image_np.tobytes()
12
+
13
+ # Encode the byte data as base64
14
+ encoded_image = base64.b64encode(image_bytes).decode()
15
+ payload = {
16
+ "encoded_image": encoded_image,
17
+ "width": image_np.shape[1],
18
+ "height": image_np.shape[0],
19
+ "channels": image_np.shape[2],
20
+ }
21
+ payload_json = json.dumps(payload)
22
+ return payload_json
23
+
24
+ def decode_numpy_array(payload):
25
+ payload_json = json.loads(payload)
26
+ # payload_json = payload.json()
27
+ encoded_image = payload_json["encoded_image"]
28
+ width = payload_json["width"]
29
+ height = payload_json["height"]
30
+ channels = payload_json["channels"]
31
+ # Decode the base64 data
32
+ decoded_image = base64.b64decode(encoded_image)
33
+
34
+ # Convert the byte data back to a NumPy array
35
+ image_np = np.frombuffer(decoded_image, dtype=np.uint8).reshape(height, width, channels)
36
+
37
+ return image_np
38
+
39
+
40
+ def preprocess_image(image_np, max_size=224):
41
+ # Convert the numpy array to a PIL image
42
+ image = Image.fromarray(image_np)
43
+
44
+ # Define the transformation pipeline
45
+ transforms = Compose([
46
+ Resize(max_size, interpolation=Image.BICUBIC),
47
+ CenterCrop(max_size),
48
+ ])
49
+
50
+ # Apply the transformations to the image
51
+ image = transforms(image)
52
+
53
+ # Convert the PIL image back to a numpy array
54
+ image_np = np.array(image)
55
+
56
+ return image_np
app.py CHANGED
@@ -9,6 +9,8 @@ import math
9
  # from transformers import CLIPTextModel, CLIPTokenizer
10
  import os
11
 
 
 
12
  # clip_model_id = "openai/clip-vit-large-patch14-336"
13
  # clip_retrieval_indice_name, clip_model_id ="laion5B-L-14", "/laion/CLIP-ViT-L-14-laion2B-s32B-b82K"
14
  clip_retrieval_service_url = "https://knn.laion.ai/knn-service"
@@ -35,61 +37,7 @@ def debug_print(*args, **kwargs):
35
  if debug_print_on:
36
  print(*args, **kwargs)
37
 
38
- # support sending images as base64
39
-
40
- def encode_numpy_array(image_np):
41
- import base64
42
- import json
43
- # Flatten the numpy array and convert it to bytes
44
- image_bytes = image_np.tobytes()
45
-
46
- # Encode the byte data as base64
47
- encoded_image = base64.b64encode(image_bytes).decode()
48
- payload = {
49
- "encoded_image": encoded_image,
50
- "width": image_np.shape[1],
51
- "height": image_np.shape[0],
52
- "channels": image_np.shape[2],
53
- }
54
- payload_json = json.dumps(payload)
55
- return payload_json
56
-
57
- def decode_numpy_array(payload):
58
- import base64
59
- import json
60
- payload_json = json.loads(payload)
61
- # payload_json = payload.json()
62
- encoded_image = payload_json["encoded_image"]
63
- width = payload_json["width"]
64
- height = payload_json["height"]
65
- channels = payload_json["channels"]
66
- # Decode the base64 data
67
- decoded_image = base64.b64decode(encoded_image)
68
-
69
- # Convert the byte data back to a NumPy array
70
- image_np = np.frombuffer(decoded_image, dtype=np.uint8).reshape(height, width, channels)
71
-
72
- return image_np
73
-
74
-
75
- def preprocess_image(image_np, max_size=224):
76
- from torchvision.transforms import Compose, Resize, CenterCrop
77
- # Convert the numpy array to a PIL image
78
- image = Image.fromarray(image_np)
79
-
80
- # Define the transformation pipeline
81
- transforms = Compose([
82
- Resize(max_size, interpolation=Image.BICUBIC),
83
- CenterCrop(max_size),
84
- ])
85
-
86
- # Apply the transformations to the image
87
- image = transforms(image)
88
-
89
- # Convert the PIL image back to a numpy array
90
- image_np = np.array(image)
91
 
92
- return image_np
93
 
94
  def image_to_embedding(input_im):
95
  # debug_print("image_to_embedding")
 
9
  # from transformers import CLIPTextModel, CLIPTokenizer
10
  import os
11
 
12
+ from api_helper import encode_numpy_array, decode_numpy_array, preprocess_image
13
+
14
  # clip_model_id = "openai/clip-vit-large-patch14-336"
15
  # clip_retrieval_indice_name, clip_model_id ="laion5B-L-14", "/laion/CLIP-ViT-L-14-laion2B-s32B-b82K"
16
  clip_retrieval_service_url = "https://knn.laion.ai/knn-service"
 
37
  if debug_print_on:
38
  print(*args, **kwargs)
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
 
41
 
42
  def image_to_embedding(input_im):
43
  # debug_print("image_to_embedding")