parokshsaxena commited on
Commit
8c02e1d
β€’
1 Parent(s): b608c7b

handling both shein images and viton standard images

Browse files
Files changed (1) hide show
  1. app.py +17 -7
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import logging
 
2
  import gradio as gr
3
  from PIL import Image
4
  from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
@@ -135,16 +136,25 @@ CATEGORY = "upper_body" # "lower_body"
135
 
136
  @spaces.GPU
137
  def start_tryon(dict,garm_img,garment_des, background_img, is_checked,is_checked_crop,denoise_steps,seed):
138
- device = "cuda"
139
- # device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
140
 
141
  openpose_model.preprocessor.body_estimation.model.to(device)
142
  pipe.to(device)
143
  pipe.unet_encoder.to(device)
 
 
 
 
 
 
 
 
 
 
 
144
 
145
  garm_img= garm_img.convert("RGB").resize((WIDTH,HEIGHT))
146
- # human_img_orig = dict["background"].convert("RGB")
147
- human_img_orig = dict.convert("RGB")
148
  if is_checked_crop:
149
  width, height = human_img_orig.size
150
  target_width = int(min(width, height * (3 / 4)))
@@ -272,8 +282,8 @@ human_list = os.listdir(os.path.join(example_path,"human"))
272
  human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
273
 
274
  human_ex_list = []
275
- human_ex_list = human_list_path
276
- """ if using ImageEditor instead of Image while taking input, use this
277
  for ex_human in human_list_path:
278
  ex_dict= {}
279
  ex_dict['background'] = ex_human
@@ -293,7 +303,7 @@ with image_blocks as demo:
293
  with gr.Column():
294
  # changing from ImageEditor to Image to allow easy passing of data through API
295
  # instead of passing {"dictionary": <>} ( which is failing ), we can directly pass the image
296
- # imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
297
  imgs = gr.Image(sources='upload', type='pil',label='Human. Mask with pen or use auto-masking')
298
  with gr.Row():
299
  is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
 
1
  import logging
2
+ import math
3
  import gradio as gr
4
  from PIL import Image
5
  from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
 
136
 
137
  @spaces.GPU
138
  def start_tryon(dict,garm_img,garment_des, background_img, is_checked,is_checked_crop,denoise_steps,seed):
139
+ #device = "cuda"
140
+ device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
141
 
142
  openpose_model.preprocessor.body_estimation.model.to(device)
143
  pipe.to(device)
144
  pipe.unet_encoder.to(device)
145
+
146
+ #human_img_orig = dict["background"].convert("RGB") # ImageEditor
147
+ human_img_orig = dict.convert("RGB") # Image
148
+
149
+ # Derive HEIGHT & WIDTH such that width is not more than 1000. This will cater to both Shein images (4160x6240) of 3:4 AR and model standard images ( 768x1024 ) of 2:3 AR
150
+ WIDTH, HEIGHT = human_img_orig.size
151
+ division_factor = math.ceil(WIDTH/1000)
152
+ WIDTH = int(WIDTH/division_factor)
153
+ HEIGHT = int(HEIGHT/division_factor)
154
+ POSE_WIDTH = int(WIDTH/2)
155
+ POSE_HEIGHT = int(HEIGHT/2)
156
 
157
  garm_img= garm_img.convert("RGB").resize((WIDTH,HEIGHT))
 
 
158
  if is_checked_crop:
159
  width, height = human_img_orig.size
160
  target_width = int(min(width, height * (3 / 4)))
 
282
  human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
283
 
284
  human_ex_list = []
285
+ human_ex_list = human_list_path # Image
286
+ """ if using ImageEditor instead of Image while taking input, use this - ImageEditor
287
  for ex_human in human_list_path:
288
  ex_dict= {}
289
  ex_dict['background'] = ex_human
 
303
  with gr.Column():
304
  # changing from ImageEditor to Image to allow easy passing of data through API
305
  # instead of passing {"dictionary": <>} ( which is failing ), we can directly pass the image
306
+ #imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
307
  imgs = gr.Image(sources='upload', type='pil',label='Human. Mask with pen or use auto-masking')
308
  with gr.Row():
309
  is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)