hadisalman commited on
Commit
433d7c5
1 Parent(s): 9eb36be

small fixes

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -68,7 +68,7 @@ def immunize_fn(init_image, mask_image):
68
  criterion=torch.nn.MSELoss(),
69
  clamp_min=-1,
70
  clamp_max=1,
71
- eps=0.1,
72
  step_size=0.01,
73
  iters=200,
74
  mask=1-mask
@@ -115,7 +115,7 @@ def run(image, prompt, seed, immunize=False):
115
 
116
  demo = gr.Interface(fn=run,
117
  inputs=[
118
- gr.ImageMask(label='Input Image (Use drawing tool to mask the regions you want to keep, e.g. faces)'),
119
  gr.Textbox(label='Prompt', placeholder='A photo of a man in a wedding'),
120
  gr.Textbox(label='Seed (Change to get different edits!)', placeholder=str(DEFAULT_SEED), visible=True),
121
  gr.Checkbox(label='Immunize', value=False),
@@ -128,29 +128,28 @@ demo = gr.Interface(fn=run,
128
  examples=[
129
  ['./images/hadi_and_trevor.jpg', 'man attending a wedding', '329357'],
130
  ['./images/trevor_2.jpg', 'two men in prison', '329357'],
131
- ['./images/trevor_3.jpg', 'man in a private jet', '329357'],
132
  ['./images/elon_2.jpg', 'man in a metro station', '214213'],
133
  ],
134
  examples_per_page=20,
135
  allow_flagging='never',
136
- title="Immunize your photos against manipulation by Stable Diffusion",
137
  description='''<u>Official</u> demo of our paper: <br>
138
  **Raising the Cost of Malicious AI-Powered Image Editing** <br>
139
- *Hadi Salman\*, Alaa Khaddaj\*, Guillaume Leclerc\*, Andrew Ilyas, Aleksander Madry* <br>
140
  MIT &nbsp;&nbsp;[Paper](https://arxiv.org/abs/2302.06588)
141
  &nbsp;&nbsp;[Blog post](https://gradientscience.org/photoguard/)
142
  &nbsp;&nbsp;[![](https://badgen.net/badge/icon/GitHub?icon=github&label)](https://github.com/MadryLab/photoguard)
143
  <br />
144
- We present an approach to mitigating the risks of malicious image editing posed by large diffusion models. The key idea is to immunize images so as to make them resistant to manipulation by these models. This immunization relies on injection of imperceptible adversarial perturbations designed to disrupt the operation of the targeted diffusion models, forcing them to generate unrealistic images.
145
  <br />
146
- **This is a research project and is not production-ready.**
147
  <details closed>
148
- <summary>Demo steps:</summary>
149
 
150
  + Upload an image (or select from the below examples!)
151
  + Mask (using the drawing tool) the parts of the image you want to maintain unedited (e.g., faces of people)
152
  + Add a prompt to edit the image accordingly (see examples below)
153
- + Play with the seed and click submit until you get a realistic edit that you are happy with (we have good seeds for you below)
154
 
155
  Now let's immunize your image and try again!
156
  + Click on the "immunize" button, then submit.
@@ -159,5 +158,5 @@ Now let's immunize your image and try again!
159
  ''',
160
  )
161
 
162
- demo.launch()
163
- # demo.launch(server_name='0.0.0.0', share=False, server_port=7860, inline=False)
 
68
  criterion=torch.nn.MSELoss(),
69
  clamp_min=-1,
70
  clamp_max=1,
71
+ eps=0.12,
72
  step_size=0.01,
73
  iters=200,
74
  mask=1-mask
 
115
 
116
  demo = gr.Interface(fn=run,
117
  inputs=[
118
+ gr.ImageMask(label='Drawing tool to mask regions you want to keep, e.g. faces'),
119
  gr.Textbox(label='Prompt', placeholder='A photo of a man in a wedding'),
120
  gr.Textbox(label='Seed (Change to get different edits!)', placeholder=str(DEFAULT_SEED), visible=True),
121
  gr.Checkbox(label='Immunize', value=False),
 
128
  examples=[
129
  ['./images/hadi_and_trevor.jpg', 'man attending a wedding', '329357'],
130
  ['./images/trevor_2.jpg', 'two men in prison', '329357'],
 
131
  ['./images/elon_2.jpg', 'man in a metro station', '214213'],
132
  ],
133
  examples_per_page=20,
134
  allow_flagging='never',
135
+ title="Interactive Demo: Immunize your Photos Against AI-powered Malicious Manipulation",
136
  description='''<u>Official</u> demo of our paper: <br>
137
  **Raising the Cost of Malicious AI-Powered Image Editing** <br>
138
+ *[Hadi Salman](https://twitter.com/hadisalmanX)\*, [Alaa Khaddaj](https://twitter.com/Alaa_Khaddaj)\*, [Guillaume Leclerc](https://twitter.com/gpoleclerc)\*, [Andrew Ilyas](https://twitter.com/andrew_ilyas), [Aleksander Madry](https://twitter.com/aleks_madry)* <br>
139
  MIT &nbsp;&nbsp;[Paper](https://arxiv.org/abs/2302.06588)
140
  &nbsp;&nbsp;[Blog post](https://gradientscience.org/photoguard/)
141
  &nbsp;&nbsp;[![](https://badgen.net/badge/icon/GitHub?icon=github&label)](https://github.com/MadryLab/photoguard)
142
  <br />
143
+ Below you can test our (encoder attack) immunization method for making images resistant to manipulation by Stable Diffusion. This immunization process forces the model to perform unrealistic edits.
144
  <br />
145
+ **This is a research project and is not production-ready. See Section 5 in our paper for discussion on its limitations.**
146
  <details closed>
147
+ <summary>Click for demo steps:</summary>
148
 
149
  + Upload an image (or select from the below examples!)
150
  + Mask (using the drawing tool) the parts of the image you want to maintain unedited (e.g., faces of people)
151
  + Add a prompt to edit the image accordingly (see examples below)
152
+ + Play with the seed and click submit until you get a realistic edit that you are happy with (or use default seeds below)
153
 
154
  Now let's immunize your image and try again!
155
  + Click on the "immunize" button, then submit.
 
158
  ''',
159
  )
160
 
161
+ # demo.launch()
162
+ demo.launch(server_name='0.0.0.0', share=False, server_port=7860, inline=False)