|
import gradio as gr |
|
import torch |
|
import numpy as np |
|
import requests |
|
import random |
|
from io import BytesIO |
|
from utils import * |
|
from constants import * |
|
from inversion_utils import * |
|
from modified_pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline |
|
from torch import autocast, inference_mode |
|
from diffusers import StableDiffusionPipeline |
|
from diffusers import DDIMScheduler |
|
from transformers import AutoProcessor, BlipForConditionalGeneration |
|
|
|
|
|
sd_model_id = "stabilityai/stable-diffusion-2-base" |
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
sd_pipe = StableDiffusionPipeline.from_pretrained(sd_model_id).to(device) |
|
sd_pipe.scheduler = DDIMScheduler.from_config(sd_model_id, subfolder = "scheduler") |
|
sem_pipe = SemanticStableDiffusionPipeline.from_pretrained(sd_model_id).to(device) |
|
blip_processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") |
|
blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(device) |
|
|
|
|
|
|
|
|
|
def caption_image(input_image): |
|
|
|
inputs = blip_processor(images=input_image, return_tensors="pt").to(device) |
|
pixel_values = inputs.pixel_values |
|
|
|
generated_ids = blip_model.generate(pixel_values=pixel_values, max_length=50) |
|
generated_caption = blip_processor.batch_decode(generated_ids, skip_special_tokens=True)[0] |
|
return generated_caption |
|
|
|
|
|
|
|
def invert(x0, prompt_src="", num_diffusion_steps=100, cfg_scale_src = 3.5, eta = 1): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sd_pipe.scheduler.set_timesteps(num_diffusion_steps) |
|
|
|
|
|
with autocast("cuda"), inference_mode(): |
|
w0 = (sd_pipe.vae.encode(x0).latent_dist.mode() * 0.18215).float() |
|
|
|
|
|
wt, zs, wts = inversion_forward_process(sd_pipe, w0, etas=eta, prompt=prompt_src, cfg_scale=cfg_scale_src, prog_bar=True, num_inference_steps=num_diffusion_steps) |
|
return zs, wts |
|
|
|
|
|
def sample(zs, wts, prompt_tar="", cfg_scale_tar=15, skip=36, eta = 1): |
|
|
|
|
|
w0, _ = inversion_reverse_process(sd_pipe, xT=wts[skip], etas=eta, prompts=[prompt_tar], cfg_scales=[cfg_scale_tar], prog_bar=True, zs=zs[skip:]) |
|
|
|
|
|
with autocast("cuda"), inference_mode(): |
|
x0_dec = sd_pipe.vae.decode(1 / 0.18215 * w0).sample |
|
if x0_dec.dim()<4: |
|
x0_dec = x0_dec[None,:,:,:] |
|
img = image_grid(x0_dec) |
|
return img |
|
|
|
|
|
def reconstruct(tar_prompt, |
|
tar_cfg_scale, |
|
skip, |
|
wts, zs, |
|
do_reconstruction, |
|
reconstruction, |
|
reconstruct_button |
|
): |
|
|
|
if reconstruct_button == "Hide Reconstruction": |
|
return reconstruction.value, reconstruction, ddpm_edited_image.update(visible=False), do_reconstruction, "Show Reconstruction" |
|
|
|
else: |
|
if do_reconstruction: |
|
reconstruction_img = sample(zs.value, wts.value, prompt_tar=tar_prompt, skip=skip, cfg_scale_tar=tar_cfg_scale) |
|
reconstruction = gr.State(value=reconstruction_img) |
|
do_reconstruction = False |
|
return reconstruction.value, reconstruction, ddpm_edited_image.update(visible=True), do_reconstruction, "Hide Reconstruction" |
|
|
|
|
|
def load_and_invert( |
|
input_image, |
|
do_inversion, |
|
seed, randomize_seed, |
|
wts, zs, |
|
src_prompt ="", |
|
tar_prompt="", |
|
steps=100, |
|
src_cfg_scale = 3.5, |
|
skip=36, |
|
tar_cfg_scale=15, |
|
progress=gr.Progress(track_tqdm=True) |
|
|
|
): |
|
|
|
|
|
x0 = load_512(input_image, device=device) |
|
|
|
if do_inversion or randomize_seed: |
|
|
|
zs_tensor, wts_tensor = invert(x0 =x0 , prompt_src=src_prompt, num_diffusion_steps=steps, cfg_scale_src=src_cfg_scale) |
|
wts = gr.State(value=wts_tensor) |
|
zs = gr.State(value=zs_tensor) |
|
do_inversion = False |
|
|
|
return wts, zs, do_inversion, inversion_progress.update(visible=False) |
|
|
|
|
|
|
|
def edit(input_image, |
|
wts, zs, |
|
tar_prompt, |
|
steps, |
|
skip, |
|
tar_cfg_scale, |
|
edit_concept_1,edit_concept_2,edit_concept_3, |
|
guidnace_scale_1,guidnace_scale_2,guidnace_scale_3, |
|
warmup_1, warmup_2, warmup_3, |
|
neg_guidance_1, neg_guidance_2, neg_guidance_3, |
|
threshold_1, threshold_2, threshold_3, |
|
do_reconstruction, |
|
reconstruction): |
|
|
|
|
|
if edit_concept_1 != "" or edit_concept_2 != "" or edit_concept_3 != "": |
|
editing_args = dict( |
|
editing_prompt = [edit_concept_1,edit_concept_2,edit_concept_3], |
|
reverse_editing_direction = [ neg_guidance_1, neg_guidance_2, neg_guidance_3,], |
|
edit_warmup_steps=[warmup_1, warmup_2, warmup_3,], |
|
edit_guidance_scale=[guidnace_scale_1,guidnace_scale_2,guidnace_scale_3], |
|
edit_threshold=[threshold_1, threshold_2, threshold_3], |
|
edit_momentum_scale=0.3, |
|
edit_mom_beta=0.6, |
|
eta=1,) |
|
|
|
latnets = wts.value[skip].expand(1, -1, -1, -1) |
|
sega_out = sem_pipe(prompt=tar_prompt, latents=latnets, guidance_scale = tar_cfg_scale, |
|
num_images_per_prompt=1, |
|
num_inference_steps=steps, |
|
use_ddpm=True, wts=wts.value, zs=zs.value[skip:], **editing_args) |
|
|
|
return sega_out.images[0], reconstruct_button.update(visible=True), do_reconstruction, reconstruction |
|
|
|
else: |
|
|
|
if do_reconstruction: |
|
pure_ddpm_img = sample(zs.value, wts.value, prompt_tar=tar_prompt, skip=skip, cfg_scale_tar=tar_cfg_scale) |
|
reconstruction = gr.State(value=pure_ddpm_img) |
|
do_reconstruction = False |
|
return pure_ddpm_img, reconstruct_button.update(visible=False), do_reconstruction, reconstruction |
|
|
|
return reconstruction, reconstruct_button.update(visible=False), do_reconstruction, reconstruction |
|
|
|
|
|
def randomize_seed_fn(seed, randomize_seed): |
|
if randomize_seed: |
|
seed = random.randint(0, np.iinfo(np.int32).max) |
|
torch.manual_seed(seed) |
|
return seed |
|
|
|
def update_label(check_negative): |
|
if(check_negative): |
|
return gr.update(value="Remove") |
|
else: |
|
return gr.update(value="Include") |
|
|
|
|
|
|
|
def get_example(): |
|
case = [ |
|
[ |
|
'examples/source_a_cat_sitting_next_to_a_mirror.jpeg', |
|
'a cat sitting next to a mirror', |
|
'watercolor painting of a cat sitting next to a mirror', |
|
100, |
|
36, |
|
15, |
|
'Schnauzer dog', 'cat', |
|
5.5, |
|
1, |
|
'examples/ddpm_sega_watercolor_painting_a_cat_sitting_next_to_a_mirror_plus_dog_minus_cat.png' |
|
], |
|
[ |
|
'examples/source_a_man_wearing_a_brown_hoodie_in_a_crowded_street.jpeg', |
|
'a man wearing a brown hoodie in a crowded street', |
|
'a robot wearing a brown hoodie in a crowded street', |
|
100, |
|
36, |
|
15, |
|
'painting','', |
|
10, |
|
1, |
|
'examples/ddpm_sega_painting_of_a_robot_wearing_a_brown_hoodie_in_a_crowded_street.png' |
|
], |
|
[ |
|
'examples/source_wall_with_framed_photos.jpeg', |
|
'', |
|
'', |
|
100, |
|
36, |
|
15, |
|
'pink drawings of muffins','', |
|
10, |
|
1, |
|
'examples/ddpm_sega_plus_pink_drawings_of_muffins.png' |
|
], |
|
[ |
|
'examples/source_an_empty_room_with_concrete_walls.jpg', |
|
'an empty room with concrete walls', |
|
'glass walls', |
|
100, |
|
36, |
|
17, |
|
'giant elephant','', |
|
10, |
|
1, |
|
'examples/ddpm_sega_glass_walls_gian_elephant.png' |
|
]] |
|
return case |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
intro = """ |
|
<h1 style="font-weight: 1400; text-align: center; margin-bottom: 7px;"> |
|
LEDITS - Pipeline for editing images |
|
</h1> |
|
<h3 style="font-weight: 600; text-align: center;"> |
|
Real Image Latent Editing with Edit Friendly DDPM and Semantic Guidance |
|
</h3> |
|
<h4 style="text-align: center; margin-bottom: 7px;"> |
|
<a href="https://editing-images-project.hf.space/" style="text-decoration: underline;" target="_blank">Project Page</a> | <a href="#" style="text-decoration: underline;" target="_blank">ArXiv</a> |
|
</h4> |
|
|
|
<p style="font-size: 0.9rem; margin: 0rem; line-height: 1.2em; margin-top:1em"> |
|
<a href="https://huggingface.co/spaces/editing-images/edit_friendly_ddpm_x_sega?duplicate=true"> |
|
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3CWLGkA" alt="Duplicate Space"></a> |
|
<p/>""" |
|
|
|
help_text = """ |
|
- **Getting Started - edit images with DDPM X SEGA:** |
|
|
|
The are 3 general setting options you can play with - |
|
|
|
1. **Pure DDPM Edit -** Describe the desired edited output image in detail |
|
2. **Pure SEGA Edit -** Keep the target prompt empty ***or*** with a description of the original image and add editing concepts for Semantic Gudiance editing |
|
3. **Combined -** Describe the desired edited output image in detail and add additional SEGA editing concepts on top |
|
- **Getting Started - Tips** |
|
|
|
While the best approach depends on your editing objective and source image, we can layout a few guiding tips to use as a starting point - |
|
|
|
1. **DDPM** is usually more suited for scene/style changes and major subject changes (for example ) while **SEGA** allows for more fine grained control, changes are more delicate, more suited for adding details (for example facial expressions and attributes, subtle style modifications, object adding/removing) |
|
2. The more you describe the scene in the target prompt (both the parts and details you wish to keep the same and those you wish to change), the better the result |
|
3. **Combining DDPM Edit with SEGA -** |
|
Try dividing your editing objective to more significant scene/style/subject changes and detail adding/removing and more moderate changes. Then describe the major changes in a detailed target prompt and add the more fine grained details as SEGA concepts. |
|
4. **Reconstruction:** Using an empty source prompt + target prompt will lead to a perfect reconstruction |
|
- **Fidelity vs creativity**: |
|
|
|
Bigger values → more fidelity, smaller values → more creativity |
|
|
|
1. `Skip Steps` |
|
2. `Warmup` (SEGA) |
|
3. `Threshold` (SEGA) |
|
|
|
Bigger values → more creativity, smaller values → more fidelity |
|
|
|
1. `Guidance Scale` |
|
2. `Concept Guidance Scale` (SEGA) |
|
""" |
|
|
|
with gr.Blocks(css="style.css") as demo: |
|
|
|
def add_concept(sega_concepts_counter): |
|
if sega_concepts_counter == 1: |
|
return row2.update(visible=True), row2_advanced.update(visible=True), row3.update(visible=False), row3_advanced.update(visible=False), add_concept_button.update(visible=True), 2 |
|
else: |
|
return row2.update(visible=True), row2_advanced.update(visible=True), row3.update(visible=True), row3_advanced.update(visible=True), add_concept_button.update(visible=False), 3 |
|
|
|
def update_display_concept_1(add_1, edit_concept_1, neg_guidance_1): |
|
if add_1 == 'Add' and edit_concept_1 != "": |
|
return box1.update(visible=True), edit_concept_1, concept_1.update(visible=True), edit_concept_1, guidnace_scale_1.update(visible=True), neg_guidance_1, "Clear" |
|
else: |
|
return box1.update(visible=False),"", concept_1.update(visible=False), "", guidnace_scale_1.update(visible=False), False, "Add" |
|
|
|
def update_display_concept_2(add_2, edit_concept_2, neg_guidance_2): |
|
if add_2 == 'Add' and edit_concept_2 != "": |
|
return box2.update(visible=True), edit_concept_2, concept_2.update(visible=True),edit_concept_2, guidnace_scale_2.update(visible=True), neg_guidance_2, "Clear" |
|
else: |
|
return box2.update(visible=False),"", concept_2.update(visible=False), "", guidnace_scale_2.update(visible=False), False, "Add" |
|
|
|
def update_display_concept_3(add_3, edit_concept_3, neg_guidance_3): |
|
if add_3 == 'Add'and edit_concept_3 != "": |
|
return box3.update(visible=True), edit_concept_3, concept_3.update(visible=True), edit_concept_3, guidnace_scale_3.update(visible=True), neg_guidance_3, "Clear" |
|
else: |
|
return box3.update(visible=False), "", concept_3.update(visible=False), "", guidnace_scale_3.update(visible=False), False, "Add" |
|
|
|
def display_editing_options(run_button, clear_button, sega_tab): |
|
return run_button.update(visible=True), clear_button.update(visible=True), sega_tab.update(visible=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def reset_do_inversion(): |
|
do_inversion = True |
|
return do_inversion |
|
|
|
def reset_do_reconstruction(): |
|
do_reconstruction = True |
|
return do_reconstruction |
|
|
|
def update_inversion_progress_visibility(input_image, do_inversion): |
|
if do_inversion and not input_image is None: |
|
return inversion_progress.update(visible=True) |
|
else: |
|
return inversion_progress.update(visible=False) |
|
|
|
def undo(): |
|
return |
|
|
|
|
|
gr.HTML(intro) |
|
wts = gr.State() |
|
zs = gr.State() |
|
reconstruction = gr.State() |
|
do_inversion = gr.State(value=True) |
|
do_reconstruction = gr.State(value=True) |
|
sega_concepts_counter = gr.State(1) |
|
|
|
|
|
|
|
with gr.Row(): |
|
input_image = gr.Image(label="Input Image", interactive=True) |
|
ddpm_edited_image = gr.Image(label=f"DDPM Reconstructed Image", interactive=False, visible=False) |
|
sega_edited_image = gr.Image(label=f"DDPM + SEGA Edited Image", interactive=False) |
|
input_image.style(height=365, width=365) |
|
ddpm_edited_image.style(height=365, width=365) |
|
sega_edited_image.style(height=365, width=365) |
|
|
|
with gr.Row(): |
|
with gr.Box(visible=False) as box1: |
|
concept_1 = gr.Button(visible=False) |
|
guidnace_scale_1 = gr.Slider(label='Concept Guidance Scale', minimum=1, maximum=30, |
|
info="How strongly the concept should be included in the image", |
|
value=DEFAULT_SEGA_CONCEPT_GUIDANCE_SCALE, |
|
step=0.5, interactive=True,visible=False) |
|
with gr.Box(visible=False) as box2: |
|
concept_2 = gr.Button(visible=False) |
|
guidnace_scale_2 = gr.Slider(label='Concept Guidance Scale', minimum=1, maximum=30, |
|
info="How strongly the concept should be included in the image", |
|
value=DEFAULT_SEGA_CONCEPT_GUIDANCE_SCALE, |
|
step=0.5, interactive=True,visible=False) |
|
with gr.Box(visible=False) as box3: |
|
concept_3 = gr.Button(visible=False) |
|
guidnace_scale_3 = gr.Slider(label='Concept Guidance Scale', minimum=1, maximum=30, |
|
info="How strongly the concept should be included in the image", |
|
value=DEFAULT_SEGA_CONCEPT_GUIDANCE_SCALE, |
|
step=0.5, interactive=True,visible=False) |
|
|
|
|
|
with gr.Row(): |
|
inversion_progress = gr.Textbox(visible=False, label="Inversion progress") |
|
|
|
|
|
|
|
with gr.Row().style(mobile_collapse=False, equal_height=True): |
|
tar_prompt = gr.Textbox( |
|
label="Image Description", |
|
|
|
max_lines=1, value="", |
|
placeholder="Enter your target prompt", |
|
) |
|
|
|
|
|
with gr.Box(): |
|
intro_segs = gr.Markdown("Add/Remove New Concepts to your Image") |
|
|
|
with gr.Row().style(mobile_collapse=False, equal_height=True): |
|
with gr.Column(scale=3, min_width=100): |
|
edit_concept_1 = gr.Textbox( |
|
label="Edit Concept", |
|
show_label=False, |
|
max_lines=1, value="", |
|
placeholder="E.g.: Sunglasses", |
|
) |
|
with gr.Column(scale=1, min_width=100): |
|
neg_guidance_1 = gr.Checkbox( |
|
label='Remove Concept?') |
|
|
|
|
|
|
|
|
|
with gr.Column(scale=1, min_width=100): |
|
|
|
add_1 = gr.Button('Include') |
|
|
|
|
|
with gr.Row(visible=False).style(equal_height=True) as row2: |
|
with gr.Column(scale=3, min_width=100): |
|
edit_concept_2 = gr.Textbox( |
|
label="Edit Concept", |
|
show_label=False, |
|
max_lines=1, |
|
placeholder="E.g.: Realistic", |
|
) |
|
with gr.Column(scale=1, min_width=100): |
|
neg_guidance_2 = gr.Checkbox( |
|
label='Remove Concept?',visible=True) |
|
|
|
|
|
|
|
with gr.Column(scale=1, min_width=100): |
|
add_2 = gr.Button('Include') |
|
|
|
|
|
with gr.Row(visible=False).style(equal_height=True) as row3: |
|
with gr.Column(scale=3, min_width=100): |
|
edit_concept_3 = gr.Textbox( |
|
label="Edit Concept", |
|
show_label=False, |
|
max_lines=1, |
|
placeholder="E.g.: orange", |
|
) |
|
with gr.Column(scale=1, min_width=100): |
|
neg_guidance_3 = gr.Checkbox( |
|
label='Remove Concept?',visible=True) |
|
|
|
|
|
|
|
with gr.Column(scale=1, min_width=100): |
|
add_3 = gr.Button('Include') |
|
|
|
|
|
|
|
|
|
with gr.Row().style(mobile_collapse=False, equal_height=True): |
|
add_concept_button = gr.Button("+1 concept") |
|
|
|
|
|
with gr.Row(): |
|
run_button = gr.Button("Edit your image!", visible=True) |
|
|
|
|
|
with gr.Accordion("Advanced Options", open=False): |
|
with gr.Tabs() as tabs: |
|
|
|
with gr.TabItem('General options', id=2): |
|
with gr.Row(): |
|
with gr.Column(min_width=100): |
|
clear_button = gr.Button("Clear", visible=True) |
|
src_prompt = gr.Textbox(lines=1, label="Source Prompt", interactive=True, placeholder="") |
|
steps = gr.Number(value=100, precision=0, label="Num Diffusion Steps", interactive=True) |
|
src_cfg_scale = gr.Number(value=3.5, label=f"Source Guidance Scale", interactive=True) |
|
|
|
|
|
with gr.Column(min_width=100): |
|
reconstruct_button = gr.Button("Show Reconstruction", visible=False) |
|
skip = gr.Slider(minimum=0, maximum=60, value=36, label="Skip Steps", interactive=True) |
|
tar_cfg_scale = gr.Slider(minimum=7, maximum=30,value=15, label=f"Guidance Scale", interactive=True) |
|
seed = gr.Number(value=0, precision=0, label="Seed", interactive=True) |
|
randomize_seed = gr.Checkbox(label='Randomize seed', value=False) |
|
|
|
with gr.TabItem('SEGA options', id=3) as sega_advanced_tab: |
|
|
|
with gr.Row().style(mobile_collapse=False, equal_height=True): |
|
warmup_1 = gr.Slider(label='Warmup', minimum=0, maximum=50, |
|
value=DEFAULT_WARMUP_STEPS, |
|
step=1, interactive=True) |
|
threshold_1 = gr.Slider(label='Threshold', minimum=0.5, maximum=0.99, |
|
value=DEFAULT_THRESHOLD, steps=0.01, interactive=True) |
|
|
|
|
|
with gr.Row(visible=False) as row2_advanced: |
|
warmup_2 = gr.Slider(label='Warmup', minimum=0, maximum=50, |
|
value=DEFAULT_WARMUP_STEPS, |
|
step=1, interactive=True) |
|
threshold_2 = gr.Slider(label='Threshold', minimum=0.5, maximum=0.99, |
|
value=DEFAULT_THRESHOLD, |
|
steps=0.01, interactive=True) |
|
|
|
with gr.Row(visible=False) as row3_advanced: |
|
warmup_3 = gr.Slider(label='Warmup', minimum=0, maximum=50, |
|
value=DEFAULT_WARMUP_STEPS, step=1, |
|
interactive=True) |
|
threshold_3 = gr.Slider(label='Threshold', minimum=0.5, maximum=0.99, |
|
value=DEFAULT_THRESHOLD, steps=0.01, |
|
interactive=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
neg_guidance_1.change(fn = update_label, inputs=[neg_guidance_1], outputs=[add_1]) |
|
add_1.click(fn = update_display_concept_1, inputs=[add_1, edit_concept_1, neg_guidance_1], outputs=[box1, concept_1, concept_1, edit_concept_1, guidnace_scale_1,neg_guidance_1, add_1]) |
|
add_2.click(fn = update_display_concept_2, inputs=[add_2, edit_concept_2, neg_guidance_2], outputs=[box2, concept_2, concept_2, edit_concept_2, guidnace_scale_2,neg_guidance_2, add_2]) |
|
add_3.click(fn = update_display_concept_3, inputs=[add_3, edit_concept_3, neg_guidance_3], outputs=[box3, concept_3, concept_3, edit_concept_3, guidnace_scale_3,neg_guidance_3, add_3]) |
|
|
|
|
|
add_concept_button.click(fn = add_concept, inputs=sega_concepts_counter, |
|
outputs= [row2, row2_advanced, row3, row3_advanced, add_concept_button, sega_concepts_counter], queue = False) |
|
|
|
run_button.click(fn = update_inversion_progress_visibility, inputs =[input_image,do_inversion], outputs=[inversion_progress],queue=False).then( |
|
fn=load_and_invert, |
|
inputs=[input_image, |
|
do_inversion, |
|
seed, randomize_seed, |
|
wts, zs, |
|
src_prompt, |
|
tar_prompt, |
|
steps, |
|
src_cfg_scale, |
|
skip, |
|
tar_cfg_scale |
|
], |
|
outputs=[wts, zs, do_inversion, inversion_progress], |
|
).then(fn = update_inversion_progress_visibility, inputs =[input_image,do_inversion], outputs=[inversion_progress],queue=False).success( |
|
fn=edit, |
|
inputs=[input_image, |
|
wts, zs, |
|
tar_prompt, |
|
steps, |
|
skip, |
|
tar_cfg_scale, |
|
edit_concept_1,edit_concept_2,edit_concept_3, |
|
guidnace_scale_1,guidnace_scale_2,guidnace_scale_3, |
|
warmup_1, warmup_2, warmup_3, |
|
neg_guidance_1, neg_guidance_2, neg_guidance_3, |
|
threshold_1, threshold_2, threshold_3, do_reconstruction, reconstruction |
|
|
|
], |
|
outputs=[sega_edited_image, reconstruct_button, do_reconstruction, reconstruction]) |
|
|
|
|
|
|
|
|
|
|
|
input_image.change( |
|
fn = reset_do_inversion, |
|
outputs = [do_inversion], |
|
queue = False).then(fn = update_inversion_progress_visibility, inputs =[input_image,do_inversion], |
|
outputs=[inversion_progress],queue=False).then( |
|
fn=load_and_invert, |
|
inputs=[input_image, |
|
do_inversion, |
|
seed, randomize_seed, |
|
wts, zs, |
|
src_prompt, |
|
tar_prompt, |
|
steps, |
|
src_cfg_scale, |
|
skip, |
|
tar_cfg_scale, |
|
], |
|
|
|
outputs=[wts, zs, do_inversion, inversion_progress], |
|
).then(fn = update_inversion_progress_visibility, inputs =[input_image,do_inversion], |
|
outputs=[inversion_progress],queue=False).then(fn = caption_image, |
|
inputs = [input_image], |
|
outputs = [tar_prompt]).then( |
|
lambda: reconstruct_button.update(visible=False), |
|
outputs=[reconstruct_button]).then( |
|
fn = reset_do_reconstruction, |
|
outputs = [do_reconstruction], |
|
queue = False) |
|
|
|
|
|
|
|
src_prompt.change( |
|
fn = reset_do_inversion, |
|
outputs = [do_inversion], queue = False).then( |
|
fn = reset_do_reconstruction, |
|
outputs = [do_reconstruction], queue = False) |
|
|
|
steps.change( |
|
fn = reset_do_inversion, |
|
outputs = [do_inversion], queue = False).then( |
|
fn = reset_do_reconstruction, |
|
outputs = [do_reconstruction], queue = False) |
|
|
|
|
|
src_cfg_scale.change( |
|
fn = reset_do_inversion, |
|
outputs = [do_inversion], queue = False).then( |
|
fn = reset_do_reconstruction, |
|
outputs = [do_reconstruction], queue = False) |
|
|
|
|
|
|
|
tar_prompt.change( |
|
fn = reset_do_reconstruction, |
|
outputs = [do_reconstruction], queue = False) |
|
|
|
tar_cfg_scale.change( |
|
fn = reset_do_reconstruction, |
|
outputs = [do_reconstruction], queue = False) |
|
|
|
skip.change( |
|
fn = reset_do_reconstruction, |
|
outputs = [do_reconstruction], queue = False) |
|
|
|
|
|
|
|
clear_components = [input_image,ddpm_edited_image,ddpm_edited_image,sega_edited_image, do_inversion, |
|
src_prompt, steps, src_cfg_scale, seed, |
|
tar_prompt, skip, tar_cfg_scale, reconstruct_button,reconstruct_button, |
|
edit_concept_1, guidnace_scale_1,guidnace_scale_1,warmup_1, threshold_1, neg_guidance_1, concept_1, concept_1, |
|
edit_concept_2, guidnace_scale_2,guidnace_scale_2,warmup_2, threshold_2, neg_guidance_2, concept_2, concept_2, row2, row2_advanced, |
|
edit_concept_3, guidnace_scale_3,guidnace_scale_3,warmup_3, threshold_3, neg_guidance_3, concept_3,concept_3, row3, row3_advanced ] |
|
|
|
clear_components_output_vals = [None, None,ddpm_edited_image.update(visible=False), None, True, |
|
"", DEFAULT_DIFFUSION_STEPS, DEFAULT_SOURCE_GUIDANCE_SCALE, DEFAULT_SEED, |
|
"", DEFAULT_SKIP_STEPS, DEFAULT_TARGET_GUIDANCE_SCALE, reconstruct_button.update(value="Show Reconstruction"),reconstruct_button.update(visible=False), |
|
"", DEFAULT_SEGA_CONCEPT_GUIDANCE_SCALE,guidnace_scale_1.update(visible=False), DEFAULT_WARMUP_STEPS, DEFAULT_THRESHOLD, DEFAULT_NEGATIVE_GUIDANCE, "", concept_1.update(visible=False), |
|
"", DEFAULT_SEGA_CONCEPT_GUIDANCE_SCALE,guidnace_scale_2.update(visible=False), DEFAULT_WARMUP_STEPS, DEFAULT_THRESHOLD, DEFAULT_NEGATIVE_GUIDANCE, "", concept_2.update(visible=False), row2.update(visible=False), row2_advanced.update(visible=False), |
|
"", DEFAULT_SEGA_CONCEPT_GUIDANCE_SCALE,guidnace_scale_3.update(visible=False), DEFAULT_WARMUP_STEPS, DEFAULT_THRESHOLD, DEFAULT_NEGATIVE_GUIDANCE, "",concept_3.update(visible=False), row3.update(visible=False), row3_advanced.update(visible=False) |
|
] |
|
|
|
|
|
clear_button.click(lambda: clear_components_output_vals, outputs =clear_components) |
|
|
|
reconstruct_button.click(lambda: ddpm_edited_image.update(visible=True), outputs=[ddpm_edited_image]).then(fn = reconstruct, |
|
inputs = [tar_prompt, |
|
tar_cfg_scale, |
|
skip, |
|
wts, zs, |
|
do_reconstruction, |
|
reconstruction, |
|
reconstruct_button], |
|
outputs = [ddpm_edited_image,reconstruction, ddpm_edited_image, do_reconstruction, reconstruct_button]) |
|
|
|
randomize_seed.change( |
|
fn = randomize_seed_fn, |
|
inputs = [seed, randomize_seed], |
|
outputs = [seed], |
|
queue = False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
demo.queue() |
|
demo.launch(share=False) |
|
|
|
|
|
|
|
|