shgao commited on
Commit
ecdaa2c
1 Parent(s): 75a1483
Files changed (1) hide show
  1. sam2edit.py +5 -2
sam2edit.py CHANGED
@@ -129,7 +129,7 @@ def create_demo():
129
  return full_img, res
130
 
131
 
132
- def process(condition_model, source_image, enable_all_generate, mask_image, enable_auto_prompt, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
133
 
134
  input_image = source_image["image"]
135
  if mask_image is None:
@@ -212,6 +212,7 @@ def create_demo():
212
  controlnet_conditioning_image=control.type(torch.float16),
213
  height=H,
214
  width=W,
 
215
  ).images
216
 
217
 
@@ -277,6 +278,8 @@ def create_demo():
277
  enable_all_generate = gr.Checkbox(label='Auto generation on all region.', value=False)
278
  prompt = gr.Textbox(label="Prompt (Text in the expected things of edited region)")
279
  enable_auto_prompt = gr.Checkbox(label='Auto generate text prompt from input image with BLIP2: Warning: Enable this may makes your prompt not working.', value=True)
 
 
280
  run_button = gr.Button(label="Run")
281
  condition_model = gr.Dropdown(choices=list(config_dict.keys()),
282
  value=list(config_dict.keys())[1],
@@ -308,7 +311,7 @@ def create_demo():
308
  result_gallery = gr.Gallery(
309
  label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
310
  result_text = gr.Text(label='BLIP2+Human Prompt Text')
311
- ips = [condition_model, source_image, enable_all_generate, mask_image, enable_auto_prompt, prompt, a_prompt, n_prompt, num_samples, image_resolution,
312
  detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
313
  run_button.click(fn=process, inputs=ips, outputs=[result_gallery, result_text])
314
  return demo
 
129
  return full_img, res
130
 
131
 
132
+ def process(condition_model, source_image, enable_all_generate, mask_image, control_scale, enable_auto_prompt, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
133
 
134
  input_image = source_image["image"]
135
  if mask_image is None:
 
212
  controlnet_conditioning_image=control.type(torch.float16),
213
  height=H,
214
  width=W,
215
+ controlnet_conditioning_scale=control_scale,
216
  ).images
217
 
218
 
 
278
  enable_all_generate = gr.Checkbox(label='Auto generation on all region.', value=False)
279
  prompt = gr.Textbox(label="Prompt (Text in the expected things of edited region)")
280
  enable_auto_prompt = gr.Checkbox(label='Auto generate text prompt from input image with BLIP2: Warning: Enable this may makes your prompt not working.', value=True)
281
+ control_scale = gr.Slider(
282
+ label="Mask Align strength (Large value means more strict alignment with SAM mask)", minimum=0, maximum=1, value=1, step=0.1)
283
  run_button = gr.Button(label="Run")
284
  condition_model = gr.Dropdown(choices=list(config_dict.keys()),
285
  value=list(config_dict.keys())[1],
 
311
  result_gallery = gr.Gallery(
312
  label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
313
  result_text = gr.Text(label='BLIP2+Human Prompt Text')
314
+ ips = [condition_model, source_image, enable_all_generate, mask_image, control_scale, enable_auto_prompt, prompt, a_prompt, n_prompt, num_samples, image_resolution,
315
  detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
316
  run_button.click(fn=process, inputs=ips, outputs=[result_gallery, result_text])
317
  return demo