ovi054 commited on
Commit
e813159
·
verified ·
1 Parent(s): 449d6db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -98
app.py CHANGED
@@ -23,6 +23,13 @@ pipe.fuse_lora()
23
  # optimize_pipeline_(pipe, image=Image.new("RGB", (512, 512)), prompt='prompt')
24
 
25
 
 
 
 
 
 
 
 
26
  def add_overlay(base_img, overlay_img, margin=20):
27
  """
28
  Pastes an overlay image onto the top-right corner of a base image.
@@ -67,104 +74,6 @@ def add_overlay(base_img, overlay_img, margin=20):
67
  return base
68
 
69
 
70
- # def add_overlay(base_img, overlay_img, margin=20, target_width=200):
71
- # if base_img is None or overlay_img is None:
72
- # return base_img
73
-
74
- # base = base_img.convert("RGBA")
75
- # overlay = overlay_img.convert("RGBA")
76
-
77
- # # Keep aspect ratio, resize overlay to target width
78
- # w, h = overlay.size
79
- # new_height = int(h * (target_width / w))
80
- # overlay = overlay.resize((target_width, new_height), Image.LANCZOS)
81
-
82
- # # Position: top-right with margin
83
- # x = base.width - overlay.width - margin
84
- # y = margin
85
-
86
- # # Paste overlay on base with transparency
87
- # base.paste(overlay, (x, y), overlay)
88
- # return base
89
-
90
-
91
- # @spaces.GPU
92
- # def infer(input_image, input_image_upload, overlay_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
93
- # """
94
- # Perform image editing using the FLUX.1 Kontext pipeline.
95
-
96
- # This function takes an input image and a text prompt to generate a modified version
97
- # of the image based on the provided instructions. It uses the FLUX.1 Kontext model
98
- # for contextual image editing tasks.
99
-
100
- # Args:
101
- # input_image (PIL.Image.Image): The input image to be edited. Will be converted
102
- # to RGB format if not already in that format.
103
- # prompt (str): Text description of the desired edit to apply to the image.
104
- # Examples: "Remove glasses", "Add a hat", "Change background to beach".
105
- # seed (int, optional): Random seed for reproducible generation. Defaults to 42.
106
- # Must be between 0 and MAX_SEED (2^31 - 1).
107
- # randomize_seed (bool, optional): If True, generates a random seed instead of
108
- # using the provided seed value. Defaults to False.
109
- # guidance_scale (float, optional): Controls how closely the model follows the
110
- # prompt. Higher values mean stronger adherence to the prompt but may reduce
111
- # image quality. Range: 1.0-10.0. Defaults to 2.5.
112
- # steps (int, optional): Controls how many steps to run the diffusion model for.
113
- # Range: 1-30. Defaults to 28.
114
- # progress (gr.Progress, optional): Gradio progress tracker for monitoring
115
- # generation progress. Defaults to gr.Progress(track_tqdm=True).
116
-
117
- # Returns:
118
- # tuple: A 3-tuple containing:
119
- # - PIL.Image.Image: The generated/edited image
120
- # - int: The seed value used for generation (useful when randomize_seed=True)
121
- # - gr.update: Gradio update object to make the reuse button visible
122
-
123
- # Example:
124
- # >>> edited_image, used_seed, button_update = infer(
125
- # ... input_image=my_image,
126
- # ... prompt="Add sunglasses",
127
- # ... seed=123,
128
- # ... randomize_seed=False,
129
- # ... guidance_scale=2.5
130
- # ... )
131
- # """
132
- # if randomize_seed:
133
- # seed = random.randint(0, MAX_SEED)
134
-
135
- # if input_image_upload is not None:
136
- # input_image_upload = input_image
137
- # elif "composite" in input_image and input_image["composite"] is not None:
138
- # input_image = input_image["composite"]
139
- # elif "background" in input_image and input_image["background"] is not None:
140
- # input_image = input_image["background"]
141
- # else:
142
- # raise ValueError("No valid image found in EditorValue dict (both 'composite' and 'background' are None)")
143
-
144
-
145
- # if input_image is not None:
146
- # if overlay_image is not None:
147
- # input_image = add_overlay(input_image, overlay_image)
148
-
149
- # input_image = input_image.convert("RGB")
150
- # image = pipe(
151
- # image=input_image,
152
- # prompt=prompt,
153
- # guidance_scale=guidance_scale,
154
- # width = input_image.size[0],
155
- # height = input_image.size[1],
156
- # num_inference_steps=steps,
157
- # generator=torch.Generator().manual_seed(seed),
158
- # ).images[0]
159
- # else:
160
- # image = pipe(
161
- # prompt=prompt,
162
- # guidance_scale=guidance_scale,
163
- # num_inference_steps=steps,
164
- # generator=torch.Generator().manual_seed(seed),
165
- # ).images[0]
166
- # return image, input_image, seed, gr.Button(visible=True)
167
-
168
 
169
  @spaces.GPU
170
  def infer(input_image, input_image_upload, overlay_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
@@ -272,8 +181,16 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
272
  )
273
  with gr.TabItem("Upload"):
274
  input_image_upload = gr.Image(label="Upload the drawing", type="pil")
 
 
 
 
275
  with gr.Row():
276
  overlay_image = gr.Image(label="Upload face photo", type="pil")
 
 
 
 
277
  with gr.Row():
278
  prompt = gr.Text(
279
  label="Prompt",
 
23
  # optimize_pipeline_(pipe, image=Image.new("RGB", (512, 512)), prompt='prompt')
24
 
25
 
26
+ import os
27
+
28
+ EXAMPLES_DIR = "examples"
29
+ BASE_EXAMPLES = [os.path.join(EXAMPLES_DIR, "base", f) for f in sorted(os.listdir(os.path.join(EXAMPLES_DIR, "base")))]
30
+ FACE_EXAMPLES = [os.path.join(EXAMPLES_DIR, "face", f) for f in sorted(os.listdir(os.path.join(EXAMPLES_DIR, "face")))]
31
+
32
+
33
  def add_overlay(base_img, overlay_img, margin=20):
34
  """
35
  Pastes an overlay image onto the top-right corner of a base image.
 
74
  return base
75
 
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  @spaces.GPU
79
  def infer(input_image, input_image_upload, overlay_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
 
181
  )
182
  with gr.TabItem("Upload"):
183
  input_image_upload = gr.Image(label="Upload the drawing", type="pil")
184
+ gr.Examples(
185
+ examples=[[img] for img in BASE_EXAMPLES],
186
+ inputs=[input_image_upload],
187
+ )
188
  with gr.Row():
189
  overlay_image = gr.Image(label="Upload face photo", type="pil")
190
+ gr.Examples(
191
+ examples=[[img] for img in FACE_EXAMPLES],
192
+ inputs=[overlay_image],
193
+ )
194
  with gr.Row():
195
  prompt = gr.Text(
196
  label="Prompt",