RamAnanth1 commited on
Commit
af3e036
1 Parent(s): 20928cb

Update app.py

Browse files

Add inpainting

Files changed (1) hide show
  1. app.py +102 -13
app.py CHANGED
@@ -14,8 +14,6 @@ from utils import preprocess,prepare_mask_and_masked_image, recover_image
14
 
15
  to_pil = T.ToPILImage()
16
 
17
- title = "Interactive demo: Raising the Cost of Malicious AI-Powered Image Editing"
18
-
19
  model_id_or_path = "runwayml/stable-diffusion-v1-5"
20
  # model_id_or_path = "CompVis/stable-diffusion-v1-4"
21
  # model_id_or_path = "CompVis/stable-diffusion-v1-3"
@@ -60,7 +58,7 @@ def pgd(X, model, eps=0.1, step_size=0.015, iters=40, clamp_min=0, clamp_max=1,
60
 
61
  return X_adv
62
 
63
- def process_image(raw_image,prompt):
64
  resize = T.transforms.Resize(512)
65
  center_crop = T.transforms.CenterCrop(512)
66
  init_image = center_crop(resize(raw_image))
@@ -96,19 +94,110 @@ def process_image(raw_image,prompt):
96
  image_adv = pipe_img2img(prompt=prompt, image=adv_image, strength=STRENGTH, guidance_scale=GUIDANCE, num_inference_steps=NUM_STEPS).images[0]
97
 
98
  return [(init_image,"Source Image"), (adv_image, "Adv Image"), (image_nat,"Gen. Image Nat"), (image_adv, "Gen. Image Adv")]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
- description = "This is an unofficial demo for Photoguard, which is an approach to safe-guarding images against manipulation by ML-powerd photo-editing models such as stable diffusion through immunization of images. The demo is based on the <a href='https://github.com/MadryLab/photoguard' style='text-decoration: underline;' target='_blank'> Github </a> implementation provided by the authors."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  examples = [["dog.png", "dog under heavy rain and muddy ground real"]]
102
 
103
- interface = gr.Interface(fn=process_image,
104
- inputs=[gr.Image(type="pil"), gr.Textbox(label="Prompt")],
105
- outputs=[gr.Gallery(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  label="Generated images", show_label=False, elem_id="gallery"
107
  ).style(grid=[2], height="auto")
108
- ],
109
- title=title,
110
- description = description,
111
- examples = examples
112
- )
113
 
114
- interface.launch(debug=True)
 
14
 
15
  to_pil = T.ToPILImage()
16
 
 
 
17
  model_id_or_path = "runwayml/stable-diffusion-v1-5"
18
  # model_id_or_path = "CompVis/stable-diffusion-v1-4"
19
  # model_id_or_path = "CompVis/stable-diffusion-v1-3"
 
58
 
59
  return X_adv
60
 
61
+ def process_image_img2img(raw_image,prompt):
62
  resize = T.transforms.Resize(512)
63
  center_crop = T.transforms.CenterCrop(512)
64
  init_image = center_crop(resize(raw_image))
 
94
  image_adv = pipe_img2img(prompt=prompt, image=adv_image, strength=STRENGTH, guidance_scale=GUIDANCE, num_inference_steps=NUM_STEPS).images[0]
95
 
96
  return [(init_image,"Source Image"), (adv_image, "Adv Image"), (image_nat,"Gen. Image Nat"), (image_adv, "Gen. Image Adv")]
97
+
98
+ def process_image_inpaint(raw_image,mask, prompt):
99
+ init_image = raw_image.convert('RGB').resize((512,512))
100
+ mask_image = mask.convert('RGB')
101
+ mask_image = ImageOps.invert(mask_image).resize((512,512))
102
+
103
+ # Attack using embedding of random image from internet
104
+ target_url = "https://bostonglobe-prod.cdn.arcpublishing.com/resizer/2-ZvyQ3aRNl_VNo7ja51BM5-Kpk=/960x0/cloudfront-us-east-1.images.arcpublishing.com/bostonglobe/CZOXE32LQQX5UNAB42AOA3SUY4.jpg"
105
+ response = requests.get(target_url)
106
+ target_image = Image.open(BytesIO(response.content)).convert("RGB")
107
+ target_image = target_image.resize((512, 512))
108
+
109
+ with torch.autocast('cuda'):
110
+ mask, X = prepare_mask_and_masked_image(init_image, mask_image)
111
+ X = X.half().cuda()
112
+ mask = mask.half().cuda()
113
+
114
+ # Here we attack towards the embedding of a random target image. You can also simply attack towards an embedding of zeros!
115
+ target = pipe_inpaint.vae.encode(preprocess(target_image).half().cuda()).latent_dist.mean
116
+
117
+ adv_X = pgd(X,
118
+ target = target,
119
+ model=pipe_inpaint.vae.encode,
120
+ criterion=torch.nn.MSELoss(),
121
+ clamp_min=-1,
122
+ clamp_max=1,
123
+ eps=0.06,
124
+ step_size=0.01,
125
+ iters=1000,
126
+ mask=1-mask
127
+ )
128
+
129
+ adv_X = (adv_X / 2 + 0.5).clamp(0, 1)
130
+
131
+ adv_image = to_pil(adv_X[0]).convert("RGB")
132
+ adv_image = recover_image(adv_image, init_image, mask_image, background=True)
133
+
134
+ # A good seed
135
+ SEED = 9209
136
+
137
+ # Uncomment the below to generated other images
138
+ # SEED = np.random.randint(low=0, high=100000)
139
+
140
+ torch.manual_seed(SEED)
141
+ print(SEED)
142
 
143
+ strength = 0.7
144
+ guidance_scale = 7.5
145
+ num_inference_steps = 100
146
+
147
+ image_nat = pipe_inpaint(prompt=prompt,
148
+ image=init_image,
149
+ mask_image=mask_image,
150
+ eta=1,
151
+ num_inference_steps=num_inference_steps,
152
+ guidance_scale=guidance_scale,
153
+ strength=strength
154
+ ).images[0]
155
+ image_nat = recover_image(image_nat, init_image, mask_image)
156
+
157
+ torch.manual_seed(SEED)
158
+ image_adv = pipe_inpaint(prompt=prompt,
159
+ image=adv_image,
160
+ mask_image=mask_image,
161
+ eta=1,
162
+ num_inference_steps=num_inference_steps,
163
+ guidance_scale=guidance_scale,
164
+ strength=strength
165
+ ).images[0]
166
+ image_adv = recover_image(image_adv, init_image, mask_image)
167
+
168
+ return [(init_image,"Source Image"), (adv_image, "Adv Image"), (image_nat,"Gen. Image Nat"), (image_adv, "Gen. Image Adv")]
169
+
170
+
171
  examples = [["dog.png", "dog under heavy rain and muddy ground real"]]
172
 
173
+
174
+ with gr.Blocks() as demo:
175
+ gr.Markdown("""
176
+ ## Interactive demo: Raising the Cost of Malicious AI-Powered Image Editing
177
+ """)
178
+ gr.HTML('''
179
+ <p style="margin-bottom: 10px; font-size: 94%">This is an unofficial demo for Photoguard, which is an approach to safe-guarding images against manipulation by ML-powerd photo-editing models such as stable diffusion through immunization of images. The demo is based on the <a href='https://github.com/MadryLab/photoguard' style='text-decoration: underline;' target='_blank'> Github </a> implementation provided by the authors.</p>
180
+ ''')
181
+
182
+ with gr.Column():
183
+ with gr.Tab("Simple Image to Image"):
184
+ input_image_img2img = gr.Image(type="pil", label = "Source Image")
185
+ input_prompt_img2img = gr.Textbox(label="Prompt")
186
+ run_btn_img2img = gr.Button('Run')
187
+
188
+ with gr.Tab("Simple Inpainting"):
189
+ input_image_inpaint = gr.Image(type="pil", label = "Source Image")
190
+ mask_image_inpaint = gr.Image(type="pil", label = "Mask")
191
+ input_prompt_inpaint = gr.Textbox(label="Prompt")
192
+ run_btn_inpaint = gr.Button('Run')
193
+
194
+ with gr.Row():
195
+ result_gallery = gr.Gallery(
196
  label="Generated images", show_label=False, elem_id="gallery"
197
  ).style(grid=[2], height="auto")
198
+
199
+ run_btn_img2img.click(process_image_img2img, inputs = [input_image_img2img,input_prompt_img2img], outputs = [result_gallery])
200
+ run_btn_inpaint.click(process_image_inpaint, inputs = [input_image_inpaint,mask_image_inpaint,input_prompt_inpaint], outputs = [result_gallery])
201
+
 
202
 
203
+ demo.launch(debug=True)