Kieran Fraser commited on
Commit
bfddcf6
1 Parent(s): 8786a62

Updating descriptions and labels

Browse files

Signed-off-by: Kieran Fraser <Kieran.Fraser@ibm.com>

Files changed (1) hide show
  1. app.py +37 -29
app.py CHANGED
@@ -38,6 +38,9 @@ css = """
38
  --prose-text-size: var(--text-md);
39
  --section-header-text-size: var(--text-md);
40
  }
 
 
 
41
  .center-text { text-align: center !important }
42
  .larger-gap { gap: 100px !important; }
43
  .symbols { text-align: center !important; margin: auto !important; }
@@ -169,8 +172,13 @@ def clf_evasion_evaluate(*args):
169
  learning_rate=learning_rate,
170
  max_iter=attack_max_iter, patch_type='square',
171
  patch_location=(x_location, y_location),
172
- patch_shape=(3, patch_height, patch_width))
173
- patch, _ = attacker.generate(x_subset)
 
 
 
 
 
174
  x_adv = attacker.apply_patch(x_subset, scale=0.3)
175
 
176
  outputs = hf_model.predict(x_adv)
@@ -217,28 +225,28 @@ def default_perturbation():
217
  ('./data/pgd/perturb/p10.png')]
218
 
219
  def default_pgd():
220
- return [('./data/pgd/attacked/0_airplane.png', 'airplane'),
221
- ('./data/pgd/attacked/1_automobile.png', 'automobile'),
222
- ('./data/pgd/attacked/2_bird.png', 'bird'),
223
- ('./data/pgd/attacked/3_cat.png', 'cat'),
224
- ('./data/pgd/attacked/4_deer.png', 'deer'),
225
- ('./data/pgd/attacked/5_dog.png', 'dog'),
226
- ('./data/pgd/attacked/6_frog.png', 'frog'),
227
- ('./data/pgd/attacked/7_horse.png', 'horse'),
228
- ('./data/pgd/attacked/8_ship.png', 'ship'),
229
- ('./data/pgd/attacked/9_truck.png', 'truck')]
230
 
231
  def default_patch():
232
- return [('./data/patch/0_airplane.png', 'airplane'),
233
  ('./data/patch/1_automobile.png', 'automobile'),
234
  ('./data/patch/2_bird.png', 'bird'),
235
- ('./data/patch/3_cat.png', 'cat'),
236
- ('./data/patch/4_deer.png', 'deer'),
237
- ('./data/patch/5_dog.png', 'dog'),
238
- ('./data/patch/6_frog.png', 'frog'),
239
  ('./data/patch/7_horse.png', 'horse'),
240
  ('./data/patch/8_ship.png', 'ship'),
241
- ('./data/patch/9_truck.png', 'truck')]
242
 
243
  # e.g. To use a local alternative theme: carbon_theme = Carbon()
244
  carbon_theme = Carbon()
@@ -303,7 +311,7 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
303
  however they contain subtle changes which cause the AI model to make incorrect predictions.</p><br/>''')
304
 
305
 
306
- with gr.Accordion("Projected Gradient Descent", open=False, elem_classes="custom-text"):
307
  gr.Markdown('''This attack uses the PGD optimization algorithm to identify the optimal perturbations
308
  to add to an image (i.e. changing pixel values) to cause the model to misclassify images. See more
309
  <a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
@@ -313,9 +321,9 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
313
 
314
  with gr.Column(scale=1):
315
  attack = gr.Textbox(visible=True, value="PGD", label="Attack", interactive=False)
316
- max_iter = gr.Slider(minimum=1, maximum=10, label="Max iterations", value=4)
317
- eps = gr.Slider(minimum=0.0001, maximum=1, label="Epslion", value=0.3)
318
- eps_steps = gr.Slider(minimum=0.0001, maximum=1, label="Epsilon steps", value=0.03)
319
  bt_eval_pgd = gr.Button("Evaluate ✨", elem_classes="eval-bt")
320
 
321
  # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
@@ -346,7 +354,7 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
346
 
347
  gr.Markdown('''<br/>''')
348
 
349
- with gr.Accordion("Adversarial Patch", open=False, elem_classes="custom-text"):
350
  gr.Markdown('''This attack optimizes pixels in a patch which can be overlayed on an image, causing a model to misclassify. See more
351
  <a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
352
  target="blank_">here</a>.''')
@@ -355,11 +363,11 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
355
 
356
  with gr.Column(scale=1):
357
  attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
358
- max_iter = gr.Slider(minimum=1, maximum=1000, label="Max iterations", value=10)
359
- x_location = gr.Slider(minimum=1, maximum=32, label="Location (x)", value=1)
360
- y_location = gr.Slider(minimum=1, maximum=32, label="Location (y)", value=1)
361
- patch_height = gr.Slider(minimum=1, maximum=32, label="Patch height", value=12)
362
- patch_width = gr.Slider(minimum=1, maximum=32, label="Patch width", value=12)
363
  eval_btn_patch = gr.Button("Evaluate ✨", elem_classes="eval-bt")
364
 
365
  # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
@@ -383,7 +391,7 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
383
  with gr.Column(scale=10):
384
  gr.Markdown('''<p style="font-size: 18px"><i>The original image (with optimized perturbations applied) gives us an adversarial image which fools the model.</i></p>''')
385
  adversarial_gallery = gr.Gallery(default_patch, label="Adversarial", preview=False, show_download_button=True)
386
- robust_accuracy = gr.Number(0.8, label="Robust Accuracy", precision=2)
387
 
388
  eval_btn_patch.click(clf_evasion_evaluate, inputs=[attack, max_iter, eps, eps_steps, x_location, y_location, patch_height,
389
  patch_width],
 
38
  --prose-text-size: var(--text-md);
39
  --section-header-text-size: var(--text-md);
40
  }
41
+ .eta-bar.svelte-1occ011.svelte-1occ011 {
42
+ background: #ccccff !important;
43
+ }
44
  .center-text { text-align: center !important }
45
  .larger-gap { gap: 100px !important; }
46
  .symbols { text-align: center !important; margin: auto !important; }
 
172
  learning_rate=learning_rate,
173
  max_iter=attack_max_iter, patch_type='square',
174
  patch_location=(x_location, y_location),
175
+ patch_shape=(3, patch_height, patch_width),
176
+ targeted=True)
177
+ y_one_hot = np.zeros(len(label_names))
178
+ y_one_hot[2] = 1.0
179
+ y_target = np.tile(y_one_hot, (x_subset.shape[0], 1))
180
+
181
+ patch, _ = attacker.generate(x_subset, y_target)
182
  x_adv = attacker.apply_patch(x_subset, scale=0.3)
183
 
184
  outputs = hf_model.predict(x_adv)
 
225
  ('./data/pgd/perturb/p10.png')]
226
 
227
  def default_pgd():
228
+ return [('./data/pgd/attacked/0_airplane.png', 'ship'),
229
+ ('./data/pgd/attacked/1_automobile.png', 'ship'),
230
+ ('./data/pgd/attacked/2_bird.png', 'truck'),
231
+ ('./data/pgd/attacked/3_cat.png', 'deer'),
232
+ ('./data/pgd/attacked/4_deer.png', 'dog'),
233
+ ('./data/pgd/attacked/5_dog.png', 'deer'),
234
+ ('./data/pgd/attacked/6_frog.png', 'horse'),
235
+ ('./data/pgd/attacked/7_horse.png', 'frog'),
236
+ ('./data/pgd/attacked/8_ship.png', 'frog'),
237
+ ('./data/pgd/attacked/9_truck.png', 'automobile')]
238
 
239
  def default_patch():
240
+ return [('./data/patch/0_airplane.png', 'bird'),
241
  ('./data/patch/1_automobile.png', 'automobile'),
242
  ('./data/patch/2_bird.png', 'bird'),
243
+ ('./data/patch/3_cat.png', 'bird'),
244
+ ('./data/patch/4_deer.png', 'bird'),
245
+ ('./data/patch/5_dog.png', 'bird'),
246
+ ('./data/patch/6_frog.png', 'bird'),
247
  ('./data/patch/7_horse.png', 'horse'),
248
  ('./data/patch/8_ship.png', 'ship'),
249
+ ('./data/patch/9_truck.png', 'automobile')]
250
 
251
  # e.g. To use a local alternative theme: carbon_theme = Carbon()
252
  carbon_theme = Carbon()
 
311
  however they contain subtle changes which cause the AI model to make incorrect predictions.</p><br/>''')
312
 
313
 
314
+ with gr.Accordion("Projected Gradient Descent", open=True, elem_classes="custom-text"):
315
  gr.Markdown('''This attack uses the PGD optimization algorithm to identify the optimal perturbations
316
  to add to an image (i.e. changing pixel values) to cause the model to misclassify images. See more
317
  <a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
 
321
 
322
  with gr.Column(scale=1):
323
  attack = gr.Textbox(visible=True, value="PGD", label="Attack", interactive=False)
324
+ max_iter = gr.Slider(minimum=1, maximum=10, label="Max iterations", value=4, info="Max number of iterations for attack to run searching for adversarial perturbation. Larger value prolongs search.")
325
+ eps = gr.Slider(minimum=0.0001, maximum=1, label="Epsilon", value=0.3, info="Adjusts the maximum allowed perturbation added to the image.")
326
+ eps_steps = gr.Slider(minimum=0.0001, maximum=1, label="Epsilon steps", value=0.03, info="Smaller value yields finer perturbation, slower to find adversarial image. Larger value yields more perceptible perturbations, quicker finding adversarial image.")
327
  bt_eval_pgd = gr.Button("Evaluate ✨", elem_classes="eval-bt")
328
 
329
  # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
 
354
 
355
  gr.Markdown('''<br/>''')
356
 
357
+ with gr.Accordion("Adversarial Patch", open=True, elem_classes="custom-text"):
358
  gr.Markdown('''This attack optimizes pixels in a patch which can be overlayed on an image, causing a model to misclassify. See more
359
  <a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
360
  target="blank_">here</a>.''')
 
363
 
364
  with gr.Column(scale=1):
365
  attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
366
+ max_iter = gr.Slider(minimum=1, maximum=1000, label="Max iterations", value=100, info="Max number of iterations for attack to run searching for optimal adversarial patch. Larger value prolongs search.")
367
+ x_location = gr.Slider(minimum=0.01, maximum=32, label="Location (x)", value=1, info="Moves patch left and right")
368
+ y_location = gr.Slider(minimum=0.01, maximum=32, label="Location (y)", value=1, info="Moves patch up and down")
369
+ patch_height = gr.Slider(minimum=1, maximum=32, label="Patch height", value=16)
370
+ patch_width = gr.Slider(minimum=1, maximum=32, label="Patch width", value=16)
371
  eval_btn_patch = gr.Button("Evaluate ✨", elem_classes="eval-bt")
372
 
373
  # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
 
391
  with gr.Column(scale=10):
392
  gr.Markdown('''<p style="font-size: 18px"><i>The original image (with optimized perturbations applied) gives us an adversarial image which fools the model.</i></p>''')
393
  adversarial_gallery = gr.Gallery(default_patch, label="Adversarial", preview=False, show_download_button=True)
394
+ robust_accuracy = gr.Number(0.4, label="Robust Accuracy", precision=2)
395
 
396
  eval_btn_patch.click(clf_evasion_evaluate, inputs=[attack, max_iter, eps, eps_steps, x_location, y_location, patch_height,
397
  patch_width],