Linoy Tsaban commited on
Commit
5e25b83
1 Parent(s): 162c70e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -14
app.py CHANGED
@@ -6,11 +6,7 @@ from diffusers import StableDiffusionPipeline
6
  from diffusers import DDIMScheduler
7
  from utils import *
8
  from inversion_utils import *
9
-
10
- model_id = "CompVis/stable-diffusion-v1-4"
11
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
- sd_pipe = StableDiffusionPipeline.from_pretrained(model_id).to(device)
13
- sd_pipe.scheduler = DDIMScheduler.from_config(model_id, subfolder = "scheduler")
14
  from torch import autocast, inference_mode
15
 
16
  def invert(x0, prompt_src="", num_diffusion_steps=100, cfg_scale_src = 3.5, eta = 1):
@@ -48,10 +44,17 @@ def sample(wt, zs, wts, prompt_tar="", cfg_scale_tar=15, skip=36, eta = 1):
48
  img = image_grid(x0_dec)
49
  return img
50
 
 
 
 
 
 
 
51
 
52
 
53
-
54
- def edit(input_image, input_image_prompt, target_prompt, guidance_scale=15, skip=36, num_diffusion_steps=100):
 
55
  offsets=(0,0,0,0)
56
  x0 = load_512(input_image, *offsets, device)
57
 
@@ -65,7 +68,22 @@ def edit(input_image, input_image_prompt, target_prompt, guidance_scale=15, skip
65
  pure_ddpm_out = sample(wt, zs, wts, prompt_tar=target_prompt,
66
  cfg_scale_tar=guidance_scale, skip=skip,
67
  eta = eta)
68
- return pure_ddpm_out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  # See the gradio docs for the types of inputs and outputs available
@@ -73,13 +91,15 @@ inputs = [
73
  gr.Image(label="input image", shape=(512, 512)),
74
  gr.Textbox(label="input prompt"),
75
  gr.Textbox(label="target prompt"),
76
- gr.Slider(label="guidance_scale", minimum=7, maximum=18, value=15),
 
77
  gr.Slider(label="skip", minimum=0, maximum=40, value=36),
78
- gr.Slider(label="num_diffusion_steps", minimum=0, maximum=300, value=100),
79
-
 
80
 
81
  ]
82
- outputs = gr.Image(label="result")
83
 
84
  # And the minimal interface
85
  demo = gr.Interface(
@@ -87,5 +107,4 @@ demo = gr.Interface(
87
  inputs=inputs,
88
  outputs=outputs,
89
  )
90
-
91
- demo.launch()
 
6
  from diffusers import DDIMScheduler
7
  from utils import *
8
  from inversion_utils import *
9
+ from modified_pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
 
 
 
 
10
  from torch import autocast, inference_mode
11
 
12
  def invert(x0, prompt_src="", num_diffusion_steps=100, cfg_scale_src = 3.5, eta = 1):
 
44
  img = image_grid(x0_dec)
45
  return img
46
 
47
+ # load pipelines
48
+ sd_model_id = "runwayml/stable-diffusion-v1-5"
49
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
50
+ sd_pipe = StableDiffusionPipeline.from_pretrained(model_id).to(device)
51
+ sd_pipe.scheduler = DDIMScheduler.from_config(model_id, subfolder = "scheduler")
52
+ sem_pipe = SemanticStableDiffusionPipeline.from_pretrained(sd_model_id).to(device)
53
 
54
 
55
+ def edit(input_image, input_image_prompt, target_prompt, edit_prompt,
56
+ guidance_scale=15, skip=36, num_diffusion_steps=100,
57
+ negative_guidance = False):
58
  offsets=(0,0,0,0)
59
  x0 = load_512(input_image, *offsets, device)
60
 
 
68
  pure_ddpm_out = sample(wt, zs, wts, prompt_tar=target_prompt,
69
  cfg_scale_tar=guidance_scale, skip=skip,
70
  eta = eta)
71
+
72
+ editing_args = dict(
73
+ editing_prompt = [edit_prompt],
74
+ reverse_editing_direction = [negative_guidance],
75
+ edit_warmup_steps=[5],
76
+ edit_guidance_scale=[8],
77
+ edit_threshold=[.93],
78
+ edit_momentum_scale=0.5,
79
+ edit_mom_beta=0.6
80
+ )
81
+ sega_out = sem_pipe(prompt=target_prompt,eta=eta, latents=latnets,
82
+ num_images_per_prompt=1,
83
+ guidance_scale=edit_guidance_scale,
84
+ num_inference_steps=num_diffusion_steps_pure_ddpm,
85
+ use_ddpm=True, wts=wts, zs=zs[skip:], **editing_args)
86
+ return pure_ddpm_out,sega_out.images[0]
87
 
88
 
89
  # See the gradio docs for the types of inputs and outputs available
 
91
  gr.Image(label="input image", shape=(512, 512)),
92
  gr.Textbox(label="input prompt"),
93
  gr.Textbox(label="target prompt"),
94
+ gr.Textbox(label="SEGA edit prompt"),
95
+ gr.Slider(label="guidance scale", minimum=7, maximum=18, value=15),
96
  gr.Slider(label="skip", minimum=0, maximum=40, value=36),
97
+ gr.Slider(label="num diffusion steps", minimum=0, maximum=300, value=100),
98
+ gr.Checkbox(label="SEGA negative_guidance"),
99
+
100
 
101
  ]
102
+ outputs = [gr.Image(label="DDPM"),gr.Image(label="DDPM+SEGA")]
103
 
104
  # And the minimal interface
105
  demo = gr.Interface(
 
107
  inputs=inputs,
108
  outputs=outputs,
109
  )
110
+ demo.launch() # debug=True allows you to see errors and output in Colab