Upload folder using huggingface_hub
Browse files- .ipynb_checkpoints/handler-checkpoint.py +10 -5
- .ipynb_checkpoints/helpers-checkpoint.py +7 -1
- .ipynb_checkpoints/mask_image-checkpoint.jpg +0 -0
- .ipynb_checkpoints/output_image-checkpoint.jpg +0 -0
- .ipynb_checkpoints/test_input-checkpoint.json +0 -0
- handler.py +10 -5
- helpers.py +7 -1
- mask_image.jpg +0 -0
- output_image.jpg +0 -0
- test_input.json +0 -0
.ipynb_checkpoints/handler-checkpoint.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import runpod
|
2 |
-
from helpers import
|
3 |
import base64
|
4 |
from PIL import Image
|
5 |
|
@@ -10,16 +10,21 @@ def handler(job):
|
|
10 |
garm_img_b64 = job['input']['garm_img_b64']
|
11 |
garm_img = b64_to_pil(garm_img_b64)
|
12 |
|
|
|
|
|
13 |
denoise_steps = job['input'].get('denoise_steps') if job['input'].get('denoise_steps') else 30
|
14 |
|
15 |
seed = job['input'].get('seed') if job['input'].get('seed') else 42
|
16 |
-
|
17 |
is_checked_crop = job['input'].get('is_checked_crop') if job['input'].get('is_checked_crop') else False
|
18 |
|
19 |
garment_des = job['input'].get('garment_des') if job['input'].get('garment_des') else ""
|
20 |
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
PIPE = prepare_pipeline()
|
25 |
runpod.serverless.start({"handler": handler})
|
|
|
1 |
import runpod
|
2 |
+
from helpers import get_result, b64_to_pil, pil_to_b64
|
3 |
import base64
|
4 |
from PIL import Image
|
5 |
|
|
|
10 |
garm_img_b64 = job['input']['garm_img_b64']
|
11 |
garm_img = b64_to_pil(garm_img_b64)
|
12 |
|
13 |
+
body_part = job['input'].get('body_part')
|
14 |
+
|
15 |
denoise_steps = job['input'].get('denoise_steps') if job['input'].get('denoise_steps') else 30
|
16 |
|
17 |
seed = job['input'].get('seed') if job['input'].get('seed') else 42
|
18 |
+
|
19 |
is_checked_crop = job['input'].get('is_checked_crop') if job['input'].get('is_checked_crop') else False
|
20 |
|
21 |
garment_des = job['input'].get('garment_des') if job['input'].get('garment_des') else ""
|
22 |
|
23 |
+
output_image, mask_image = get_result(human_img, garm_img, body_part, denoise_steps, seed, is_checked_crop, garment_des)
|
24 |
+
|
25 |
+
output_image.save("output_image.jpg")
|
26 |
+
mask_image.save("mask_image.jpg")
|
27 |
+
|
28 |
+
return {"output_image": pil_to_b64(output_image), "mask_image": pil_to_b64(mask_image)}
|
29 |
|
|
|
30 |
runpod.serverless.start({"handler": handler})
|
.ipynb_checkpoints/helpers-checkpoint.py
CHANGED
@@ -26,7 +26,7 @@ from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_ori
|
|
26 |
from torchvision.transforms.functional import to_pil_image
|
27 |
|
28 |
|
29 |
-
def b64_to_pil():
|
30 |
# Decode the base64 string
|
31 |
image_data = base64.b64decode(base64_string)
|
32 |
|
@@ -34,6 +34,12 @@ def b64_to_pil():
|
|
34 |
image = Image.open(BytesIO(image_data))
|
35 |
return image
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
def prepare_pipeline():
|
38 |
pass
|
39 |
base_path = 'yisol/IDM-VTON'
|
|
|
26 |
from torchvision.transforms.functional import to_pil_image
|
27 |
|
28 |
|
29 |
+
def b64_to_pil(base64_string):
|
30 |
# Decode the base64 string
|
31 |
image_data = base64.b64decode(base64_string)
|
32 |
|
|
|
34 |
image = Image.open(BytesIO(image_data))
|
35 |
return image
|
36 |
|
37 |
+
def pil_to_b64(pil_img):
|
38 |
+
buffered = BytesIO()
|
39 |
+
pil_img.save(buffered, format="PNG")
|
40 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
41 |
+
return img_str
|
42 |
+
|
43 |
def prepare_pipeline():
|
44 |
pass
|
45 |
base_path = 'yisol/IDM-VTON'
|
.ipynb_checkpoints/mask_image-checkpoint.jpg
ADDED
![]() |
.ipynb_checkpoints/output_image-checkpoint.jpg
ADDED
![]() |
.ipynb_checkpoints/test_input-checkpoint.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
handler.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import runpod
|
2 |
-
from helpers import
|
3 |
import base64
|
4 |
from PIL import Image
|
5 |
|
@@ -10,16 +10,21 @@ def handler(job):
|
|
10 |
garm_img_b64 = job['input']['garm_img_b64']
|
11 |
garm_img = b64_to_pil(garm_img_b64)
|
12 |
|
|
|
|
|
13 |
denoise_steps = job['input'].get('denoise_steps') if job['input'].get('denoise_steps') else 30
|
14 |
|
15 |
seed = job['input'].get('seed') if job['input'].get('seed') else 42
|
16 |
-
|
17 |
is_checked_crop = job['input'].get('is_checked_crop') if job['input'].get('is_checked_crop') else False
|
18 |
|
19 |
garment_des = job['input'].get('garment_des') if job['input'].get('garment_des') else ""
|
20 |
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
PIPE = prepare_pipeline()
|
25 |
runpod.serverless.start({"handler": handler})
|
|
|
1 |
import runpod
|
2 |
+
from helpers import get_result, b64_to_pil, pil_to_b64
|
3 |
import base64
|
4 |
from PIL import Image
|
5 |
|
|
|
10 |
garm_img_b64 = job['input']['garm_img_b64']
|
11 |
garm_img = b64_to_pil(garm_img_b64)
|
12 |
|
13 |
+
body_part = job['input'].get('body_part')
|
14 |
+
|
15 |
denoise_steps = job['input'].get('denoise_steps') if job['input'].get('denoise_steps') else 30
|
16 |
|
17 |
seed = job['input'].get('seed') if job['input'].get('seed') else 42
|
18 |
+
|
19 |
is_checked_crop = job['input'].get('is_checked_crop') if job['input'].get('is_checked_crop') else False
|
20 |
|
21 |
garment_des = job['input'].get('garment_des') if job['input'].get('garment_des') else ""
|
22 |
|
23 |
+
output_image, mask_image = get_result(human_img, garm_img, body_part, denoise_steps, seed, is_checked_crop, garment_des)
|
24 |
+
|
25 |
+
output_image.save("output_image.jpg")
|
26 |
+
mask_image.save("mask_image.jpg")
|
27 |
+
|
28 |
+
return {"output_image": pil_to_b64(output_image), "mask_image": pil_to_b64(mask_image)}
|
29 |
|
|
|
30 |
runpod.serverless.start({"handler": handler})
|
helpers.py
CHANGED
@@ -26,7 +26,7 @@ from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_ori
|
|
26 |
from torchvision.transforms.functional import to_pil_image
|
27 |
|
28 |
|
29 |
-
def b64_to_pil():
|
30 |
# Decode the base64 string
|
31 |
image_data = base64.b64decode(base64_string)
|
32 |
|
@@ -34,6 +34,12 @@ def b64_to_pil():
|
|
34 |
image = Image.open(BytesIO(image_data))
|
35 |
return image
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
def prepare_pipeline():
|
38 |
pass
|
39 |
base_path = 'yisol/IDM-VTON'
|
|
|
26 |
from torchvision.transforms.functional import to_pil_image
|
27 |
|
28 |
|
29 |
+
def b64_to_pil(base64_string):
|
30 |
# Decode the base64 string
|
31 |
image_data = base64.b64decode(base64_string)
|
32 |
|
|
|
34 |
image = Image.open(BytesIO(image_data))
|
35 |
return image
|
36 |
|
37 |
+
def pil_to_b64(pil_img):
|
38 |
+
buffered = BytesIO()
|
39 |
+
pil_img.save(buffered, format="PNG")
|
40 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
41 |
+
return img_str
|
42 |
+
|
43 |
def prepare_pipeline():
|
44 |
pass
|
45 |
base_path = 'yisol/IDM-VTON'
|
mask_image.jpg
ADDED
![]() |
output_image.jpg
ADDED
![]() |
test_input.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|