Kieran Fraser commited on
Commit
8dcffda
1 Parent(s): d4b0552

updating links

Browse files

Signed-off-by: Kieran Fraser <Kieran.Fraser@ibm.com>

app.py CHANGED
@@ -53,6 +53,14 @@ div.svelte-15lo0d8>*, div.svelte-15lo0d8>.form > * {
53
  }
54
  """
55
 
 
 
 
 
 
 
 
 
56
  def sample_imagenette():
57
  import torchvision
58
  label_names = [
@@ -113,12 +121,7 @@ def clf_poison_evaluate(*args):
113
 
114
  target_class = label_names.index(target_class)
115
 
116
- model = transformers.AutoModelForImageClassification.from_pretrained(
117
- 'facebook/deit-tiny-distilled-patch16-224',
118
- ignore_mismatched_sizes=True,
119
- force_download=True,
120
- num_labels=10
121
- )
122
  optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
123
  loss_fn = torch.nn.CrossEntropyLoss()
124
 
@@ -142,7 +145,7 @@ def clf_poison_evaluate(*args):
142
  train_dataset = torchvision.datasets.ImageFolder(root="./data/imagenette2-320/train", transform=transform)
143
  labels = np.asarray(train_dataset.targets)
144
  classes = np.unique(labels)
145
- samples_per_class = 10
146
 
147
  x_subset = []
148
  y_subset = []
@@ -226,6 +229,10 @@ def show_params(type):
226
  return gr.Column(visible=True)
227
  return gr.Column(visible=False)
228
 
 
 
 
 
229
  # e.g. To use a local alternative theme: carbon_theme = Carbon()
230
  carbon_theme = Carbon()
231
  with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
@@ -248,12 +255,15 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
248
  common red-team workflow to assess model vulnerability to data poisoning attacks 🧪</p>''')
249
 
250
  gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Check out the full suite of features provided by ART <a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
251
- target="blank_">here</a>.</i>
252
- add link to notebook</p>''')
253
-
 
254
  gr.Markdown('''<hr/>''')
255
 
256
 
 
 
257
  with gr.Row(elem_classes=["larger-gap", "custom-text"]):
258
  with gr.Column(scale=1):
259
  gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ First lets set the scene. You have a dataset of images, such as Imagenette.</p>''')
@@ -287,28 +297,28 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
287
  with gr.Column(scale=1):
288
  attack = gr.Textbox(visible=True, value="Backdoor", label="Attack", interactive=False)
289
  target_class = gr.Radio(label="Target class", info="The class you wish to force the model to predict.",
290
- choices=['dog',
291
  'cassette player',
292
  'chainsaw',
293
- 'church',
294
  'french horn',
295
  'garbage truck',
296
  'gas pump',
297
  'golf ball',
298
- 'parachutte',], value='dog')
299
  eval_btn_patch = gr.Button("Evaluate")
300
  with gr.Row(elem_classes="custom-text"):
301
  with gr.Column(scale=10):
302
- clean_gallery = gr.Gallery(label="Clean", preview=False, show_download_button=True)
303
  clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
304
  with gr.Column(scale=1, min_width='0px', elem_classes='symbols'):
305
  gr.Markdown('''➕''')
306
- with gr.Column(scale=5):
307
  trigger_image = gr.Image(label="Trigger Image", value="./baby-on-board.png", interactive=False)
308
  with gr.Column(scale=1, min_width='0px'):
309
  gr.Markdown('''🟰''', elem_classes='symbols')
310
  with gr.Column(scale=10):
311
- poison_gallery = gr.Gallery(label="Poisoned", preview=False, show_download_button=True)
312
  poison_success = gr.Number(label="Poison Success", precision=2)
313
 
314
  eval_btn_patch.click(clf_poison_evaluate, inputs=[attack, trigger_image, target_class],
 
53
  }
54
  """
55
 
56
+ global model
57
+ model = transformers.AutoModelForImageClassification.from_pretrained(
58
+ 'facebook/deit-tiny-distilled-patch16-224',
59
+ ignore_mismatched_sizes=True,
60
+ force_download=True,
61
+ num_labels=10
62
+ )
63
+
64
  def sample_imagenette():
65
  import torchvision
66
  label_names = [
 
121
 
122
  target_class = label_names.index(target_class)
123
 
124
+
 
 
 
 
 
125
  optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
126
  loss_fn = torch.nn.CrossEntropyLoss()
127
 
 
145
  train_dataset = torchvision.datasets.ImageFolder(root="./data/imagenette2-320/train", transform=transform)
146
  labels = np.asarray(train_dataset.targets)
147
  classes = np.unique(labels)
148
+ samples_per_class = 20
149
 
150
  x_subset = []
151
  y_subset = []
 
229
  return gr.Column(visible=True)
230
  return gr.Column(visible=False)
231
 
232
+ head_script = '''
233
+ <script async defer src="https://buttons.github.io/buttons.js"></script>
234
+ '''
235
+
236
  # e.g. To use a local alternative theme: carbon_theme = Carbon()
237
  carbon_theme = Carbon()
238
  with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
 
255
  common red-team workflow to assess model vulnerability to data poisoning attacks 🧪</p>''')
256
 
257
  gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Check out the full suite of features provided by ART <a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
258
+ target="blank_">here</a>. To dive further into poisoning attacks with Hugging Face and ART, check out our
259
+ <a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/hugging_face_poisoning.ipynb"
260
+ target="_blank">notebook</a>. Also feel free to contribute and give our repo a ⭐.</i></p>''')
261
+
262
  gr.Markdown('''<hr/>''')
263
 
264
 
265
+
266
+
267
  with gr.Row(elem_classes=["larger-gap", "custom-text"]):
268
  with gr.Column(scale=1):
269
  gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ First lets set the scene. You have a dataset of images, such as Imagenette.</p>''')
 
297
  with gr.Column(scale=1):
298
  attack = gr.Textbox(visible=True, value="Backdoor", label="Attack", interactive=False)
299
  target_class = gr.Radio(label="Target class", info="The class you wish to force the model to predict.",
300
+ choices=['church',
301
  'cassette player',
302
  'chainsaw',
303
+ 'dog',
304
  'french horn',
305
  'garbage truck',
306
  'gas pump',
307
  'golf ball',
308
+ 'parachutte',], value='church')
309
  eval_btn_patch = gr.Button("Evaluate")
310
  with gr.Row(elem_classes="custom-text"):
311
  with gr.Column(scale=10):
312
+ clean_gallery = gr.Gallery(label="Clean", preview=False, show_download_button=True, height=600)
313
  clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
314
  with gr.Column(scale=1, min_width='0px', elem_classes='symbols'):
315
  gr.Markdown('''➕''')
316
+ with gr.Column(scale=5, elem_classes='symbols'):
317
  trigger_image = gr.Image(label="Trigger Image", value="./baby-on-board.png", interactive=False)
318
  with gr.Column(scale=1, min_width='0px'):
319
  gr.Markdown('''🟰''', elem_classes='symbols')
320
  with gr.Column(scale=10):
321
+ poison_gallery = gr.Gallery(label="Poisoned", preview=False, show_download_button=True, height=600)
322
  poison_success = gr.Number(label="Poison Success", precision=2)
323
 
324
  eval_btn_patch.click(clf_poison_evaluate, inputs=[attack, trigger_image, target_class],
poisoned_models/deit_imagenette_poisoned_model_2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39f8f6f68436a69a2c06b480934eda88e2279e3b03db5226ed6b55eef16cce61
3
+ size 22201811