ovi054 commited on
Commit
1bfde71
·
verified ·
1 Parent(s): e5e37ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -72
app.py CHANGED
@@ -18,7 +18,7 @@ from diffusers.utils import load_image
18
  MAX_SEED = np.iinfo(np.int32).max
19
 
20
  pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
21
- pipe.load_lora_weights("ovi054/Draw2Photo-Kontext-LoRA")
22
  pipe.fuse_lora()
23
  # optimize_pipeline_(pipe, image=Image.new("RGB", (512, 512)), prompt='prompt')
24
 
@@ -30,53 +30,53 @@ BASE_EXAMPLES = [os.path.join(EXAMPLES_DIR, "base", f) for f in sorted(os.listdi
30
  FACE_EXAMPLES = [os.path.join(EXAMPLES_DIR, "face", f) for f in sorted(os.listdir(os.path.join(EXAMPLES_DIR, "face")))]
31
 
32
 
33
- def add_overlay(base_img, overlay_img, margin=20):
34
- """
35
- Pastes an overlay image onto the top-right corner of a base image.
36
 
37
- The overlay is resized to be 1/5th of the width of the base image,
38
- maintaining its aspect ratio.
39
 
40
- Args:
41
- base_img (PIL.Image.Image): The main image.
42
- overlay_img (PIL.Image.Image): The image to place on top.
43
- margin (int, optional): The pixel margin from the top and right edges. Defaults to 20.
44
 
45
- Returns:
46
- PIL.Image.Image: The combined image.
47
- """
48
- if base_img is None or overlay_img is None:
49
- return base_img
50
 
51
- base = base_img.convert("RGBA")
52
- overlay = overlay_img.convert("RGBA")
53
 
54
- # --- MODIFICATION ---
55
- # Calculate the target width to be 1/5th of the base image's width
56
- target_width = base.width // 5
57
 
58
- # Keep aspect ratio, resize overlay to the newly calculated target width
59
- w, h = overlay.size
60
 
61
- # Add a check to prevent division by zero if the overlay image has no width
62
- if w == 0:
63
- return base
64
 
65
- new_height = int(h * (target_width / w))
66
- overlay = overlay.resize((target_width, new_height), Image.LANCZOS)
67
 
68
- # Position: top-right corner with a margin
69
- x = base.width - overlay.width - margin
70
- y = margin
71
 
72
- # Paste the resized overlay onto the base image using its alpha channel for transparency
73
- base.paste(overlay, (x, y), overlay)
74
- return base
75
 
76
 
77
 
78
  @spaces.GPU
79
- def infer(input_image, input_image_upload, overlay_image, prompt="make it real", seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
80
  """
81
  Perform image editing using the FLUX.1 Kontext pipeline.
82
 
@@ -106,16 +106,7 @@ def infer(input_image, input_image_upload, overlay_image, prompt="make it real",
106
  # 1. Prioritize the uploaded image. If it exists, it becomes our main 'input_image'.
107
  if input_image_upload is not None:
108
  processed_input_image = input_image_upload
109
- # 2. If no image was uploaded, check the drawing canvas.
110
- elif isinstance(input_image, dict):
111
- # Extract the actual image from the dictionary provided by gr.Paint
112
- if "composite" in input_image and input_image["composite"] is not None:
113
- processed_input_image = input_image["composite"]
114
- elif "background" in input_image and input_image["background"] is not None:
115
- processed_input_image = input_image["background"]
116
- else:
117
- # The canvas is empty, so there's no input image.
118
- processed_input_image = None
119
  else:
120
  # Fallback in case the input is neither from upload nor a valid canvas dict.
121
  processed_input_image = None
@@ -124,9 +115,6 @@ def infer(input_image, input_image_upload, overlay_image, prompt="make it real",
124
 
125
  # From this point on, 'processed_input_image' is either a PIL Image or None.
126
  if processed_input_image is not None:
127
- if overlay_image is not None:
128
- # Now this function is guaranteed to receive a PIL Image.
129
- processed_input_image = add_overlay(processed_input_image, overlay_image)
130
 
131
  processed_input_image = processed_input_image.convert("RGB")
132
  image = pipe(
@@ -147,7 +135,7 @@ def infer(input_image, input_image_upload, overlay_image, prompt="make it real",
147
  generator=torch.Generator().manual_seed(seed),
148
  ).images[0]
149
 
150
- return image, processed_input_image, seed, gr.Button(visible=False)
151
 
152
  @spaces.GPU
153
  def infer_example(input_image, prompt):
@@ -171,36 +159,26 @@ Turn drawing+face into a realistic photo with FLUX.1 Kontext [dev] + [Draw2Photo
171
  """)
172
  with gr.Row():
173
  with gr.Column():
174
- gr.Markdown("Step 1. Select/Upload/Draw a person ⬇️")
175
  # input_image = gr.Image(label="Upload drawing", type="pil")
176
  with gr.Row():
177
- with gr.Tabs() as tabs:
178
- with gr.TabItem("Upload"):
179
- input_image_upload = gr.Image(label="Upload drawing", type="pil")
180
-
181
- with gr.TabItem("Draw"):
182
- input_image = gr.Paint(
183
- type="pil",
184
- brush=gr.Brush(default_size=6, colors=["#000000"], color_mode="fixed"),
185
- canvas_size = (1200,1200),
186
- layers = False
187
- )
188
  gr.Examples(
189
  examples=[[img] for img in BASE_EXAMPLES],
190
  inputs=[input_image_upload],
191
  )
192
 
193
- with gr.Column():
194
- gr.Markdown("Step 2. Select/Upload a face photo ⬇️")
195
- with gr.Row():
196
- overlay_image = gr.Image(label="Upload face photo", type="pil")
197
- gr.Examples(
198
- examples=[[img] for img in FACE_EXAMPLES],
199
- inputs=[overlay_image],
200
- )
201
 
202
  with gr.Column():
203
- gr.Markdown("Step 3. Press “Run” to get results ⬇️")
204
  with gr.Row():
205
  run_button = gr.Button("Run")
206
  with gr.Accordion("Advanced Settings", open=False):
@@ -208,7 +186,7 @@ Turn drawing+face into a realistic photo with FLUX.1 Kontext [dev] + [Draw2Photo
208
  prompt = gr.Text(
209
  label="Prompt",
210
  max_lines=1,
211
- value = "make it real",
212
  placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
213
  container=False,
214
  )
@@ -258,8 +236,8 @@ Turn drawing+face into a realistic photo with FLUX.1 Kontext [dev] + [Draw2Photo
258
  gr.on(
259
  triggers=[run_button.click, prompt.submit],
260
  fn = infer,
261
- inputs = [input_image, input_image_upload, overlay_image, prompt, seed, randomize_seed, guidance_scale, steps],
262
- outputs = [result, result_input, seed, reuse_button]
263
  )
264
  # reuse_button.click(
265
  # fn = lambda image: image,
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
 
20
  pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
21
+ pipe.load_lora_weights("ovi054/virtual-tryon-kontext-lora")
22
  pipe.fuse_lora()
23
  # optimize_pipeline_(pipe, image=Image.new("RGB", (512, 512)), prompt='prompt')
24
 
 
30
  FACE_EXAMPLES = [os.path.join(EXAMPLES_DIR, "face", f) for f in sorted(os.listdir(os.path.join(EXAMPLES_DIR, "face")))]
31
 
32
 
33
+ # def add_overlay(base_img, overlay_img, margin=20):
34
+ # """
35
+ # Pastes an overlay image onto the top-right corner of a base image.
36
 
37
+ # The overlay is resized to be 1/5th of the width of the base image,
38
+ # maintaining its aspect ratio.
39
 
40
+ # Args:
41
+ # base_img (PIL.Image.Image): The main image.
42
+ # overlay_img (PIL.Image.Image): The image to place on top.
43
+ # margin (int, optional): The pixel margin from the top and right edges. Defaults to 20.
44
 
45
+ # Returns:
46
+ # PIL.Image.Image: The combined image.
47
+ # """
48
+ # if base_img is None or overlay_img is None:
49
+ # return base_img
50
 
51
+ # base = base_img.convert("RGBA")
52
+ # overlay = overlay_img.convert("RGBA")
53
 
54
+ # # --- MODIFICATION ---
55
+ # # Calculate the target width to be 1/5th of the base image's width
56
+ # target_width = base.width // 5
57
 
58
+ # # Keep aspect ratio, resize overlay to the newly calculated target width
59
+ # w, h = overlay.size
60
 
61
+ # # Add a check to prevent division by zero if the overlay image has no width
62
+ # if w == 0:
63
+ # return base
64
 
65
+ # new_height = int(h * (target_width / w))
66
+ # overlay = overlay.resize((target_width, new_height), Image.LANCZOS)
67
 
68
+ # # Position: top-right corner with a margin
69
+ # x = base.width - overlay.width - margin
70
+ # y = margin
71
 
72
+ # # Paste the resized overlay onto the base image using its alpha channel for transparency
73
+ # base.paste(overlay, (x, y), overlay)
74
+ # return base
75
 
76
 
77
 
78
  @spaces.GPU
79
+ def infer(input_image_upload, prompt="wear it", seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
80
  """
81
  Perform image editing using the FLUX.1 Kontext pipeline.
82
 
 
106
  # 1. Prioritize the uploaded image. If it exists, it becomes our main 'input_image'.
107
  if input_image_upload is not None:
108
  processed_input_image = input_image_upload
109
+
 
 
 
 
 
 
 
 
 
110
  else:
111
  # Fallback in case the input is neither from upload nor a valid canvas dict.
112
  processed_input_image = None
 
115
 
116
  # From this point on, 'processed_input_image' is either a PIL Image or None.
117
  if processed_input_image is not None:
 
 
 
118
 
119
  processed_input_image = processed_input_image.convert("RGB")
120
  image = pipe(
 
135
  generator=torch.Generator().manual_seed(seed),
136
  ).images[0]
137
 
138
+ return image, seed, gr.Button(visible=False)
139
 
140
  @spaces.GPU
141
  def infer_example(input_image, prompt):
 
159
  """)
160
  with gr.Row():
161
  with gr.Column():
162
+ gr.Markdown("Step 1. Select/Upload a model image + clothes overlay to try on ⬇️")
163
  # input_image = gr.Image(label="Upload drawing", type="pil")
164
  with gr.Row():
165
+ input_image_upload = gr.Image(label="Upload drawing", type="pil")
 
 
 
 
 
 
 
 
 
 
166
  gr.Examples(
167
  examples=[[img] for img in BASE_EXAMPLES],
168
  inputs=[input_image_upload],
169
  )
170
 
171
+ # with gr.Column():
172
+ # gr.Markdown("Step 2. Select/Upload a face photo ⬇️")
173
+ # with gr.Row():
174
+ # overlay_image = gr.Image(label="Upload face photo", type="pil")
175
+ # gr.Examples(
176
+ # examples=[[img] for img in FACE_EXAMPLES],
177
+ # inputs=[overlay_image],
178
+ # )
179
 
180
  with gr.Column():
181
+ gr.Markdown("Step 2. Press “Run” to get results ⬇️")
182
  with gr.Row():
183
  run_button = gr.Button("Run")
184
  with gr.Accordion("Advanced Settings", open=False):
 
186
  prompt = gr.Text(
187
  label="Prompt",
188
  max_lines=1,
189
+ value = "wear it",
190
  placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
191
  container=False,
192
  )
 
236
  gr.on(
237
  triggers=[run_button.click, prompt.submit],
238
  fn = infer,
239
+ inputs = [input_image_upload, prompt, seed, randomize_seed, guidance_scale, steps],
240
+ outputs = [result, seed, reuse_button]
241
  )
242
  # reuse_button.click(
243
  # fn = lambda image: image,