lockwooda commited on
Commit
e118086
1 Parent(s): b1d2d14

Initial manual application publish

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ utils/resources/models/xview_model.pt filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ HEART Gradio Example App
3
+
4
+ To run:
5
+ - clone the repository
6
+ - execute: gradio examples/gradio_app.py or python examples/gradio_app.py
7
+ - navigate to local URL e.g. http://127.0.0.1:7860
8
+ '''
9
+
10
+ import torch
11
+ import numpy as np
12
+ import pandas as pd
13
+ # from carbon_theme import Carbon
14
+
15
+ import gradio as gr
16
+ import os
17
+
18
+ css = """
19
+ .input-image { margin: auto !important }
20
+ .small-font span{
21
+ font-size: 0.6em;
22
+ }
23
+ .df-padding {
24
+ padding-left: 50px !important;
25
+ padding-right: 50px !important;
26
+ }
27
+ """
28
+
29
+ def basic_cifar10_model():
30
+ '''
31
+ Load an example CIFAR10 model
32
+ '''
33
+ from heart.estimators.classification.pytorch import JaticPyTorchClassifier
34
+
35
+ labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
36
+ path = './'
37
+ class Model(torch.nn.Module):
38
+ """
39
+ Create model for pytorch.
40
+ Here the model does not use maxpooling. Needed for certification tests.
41
+ """
42
+
43
+ def __init__(self):
44
+ super(Model, self).__init__()
45
+
46
+ self.conv = torch.nn.Conv2d(
47
+ in_channels=3, out_channels=16, kernel_size=(4, 4), dilation=(1, 1), padding=(0, 0), stride=(3, 3)
48
+ )
49
+
50
+ self.fullyconnected = torch.nn.Linear(in_features=1600, out_features=10)
51
+
52
+ self.relu = torch.nn.ReLU()
53
+
54
+ w_conv2d = np.load(
55
+ os.path.join(
56
+ os.path.dirname(path),
57
+ "utils/resources/models",
58
+ "W_CONV2D_NO_MPOOL_CIFAR10.npy",
59
+ )
60
+ )
61
+ b_conv2d = np.load(
62
+ os.path.join(
63
+ os.path.dirname(path),
64
+ "utils/resources/models",
65
+ "B_CONV2D_NO_MPOOL_CIFAR10.npy",
66
+ )
67
+ )
68
+ w_dense = np.load(
69
+ os.path.join(
70
+ os.path.dirname(path),
71
+ "utils/resources/models",
72
+ "W_DENSE_NO_MPOOL_CIFAR10.npy",
73
+ )
74
+ )
75
+ b_dense = np.load(
76
+ os.path.join(
77
+ os.path.dirname(path),
78
+ "utils/resources/models",
79
+ "B_DENSE_NO_MPOOL_CIFAR10.npy",
80
+ )
81
+ )
82
+
83
+ self.conv.weight = torch.nn.Parameter(torch.Tensor(w_conv2d))
84
+ self.conv.bias = torch.nn.Parameter(torch.Tensor(b_conv2d))
85
+ self.fullyconnected.weight = torch.nn.Parameter(torch.Tensor(w_dense))
86
+ self.fullyconnected.bias = torch.nn.Parameter(torch.Tensor(b_dense))
87
+
88
+ # pylint: disable=W0221
89
+ # disable pylint because of API requirements for function
90
+ def forward(self, x):
91
+ """
92
+ Forward function to evaluate the model
93
+ :param x: Input to the model
94
+ :return: Prediction of the model
95
+ """
96
+ x = self.conv(x)
97
+ x = self.relu(x)
98
+ x = x.reshape(-1, 1600)
99
+ x = self.fullyconnected(x)
100
+ return x
101
+
102
+ # Define the network
103
+ model = Model()
104
+
105
+ # Define a loss function and optimizer
106
+ loss_fn = torch.nn.CrossEntropyLoss(reduction="sum")
107
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
108
+
109
+ # Get classifier
110
+ jptc = JaticPyTorchClassifier(
111
+ model=model, loss=loss_fn, optimizer=optimizer, input_shape=(3, 32, 32), nb_classes=10, clip_values=(0, 1), labels=labels
112
+ )
113
+ return jptc
114
+
115
+ def clf_evasion_evaluate(*args):
116
+ '''
117
+ Run a classification task evaluation
118
+ '''
119
+
120
+ attack = args[0]
121
+ model_type = args[1]
122
+ model_path = args[2]
123
+ model_channels = args[3]
124
+ model_height = args[4]
125
+ model_width = args[5]
126
+ model_clip = args[6]
127
+
128
+ dataset_type = args[-4]
129
+ dataset_path = args[-3]
130
+ dataset_split = args[-2]
131
+ image = args[-1]
132
+
133
+ if dataset_type == "Example XView":
134
+ from maite import load_dataset
135
+ import torchvision
136
+ jatic_dataset = load_dataset(
137
+ provider="huggingface",
138
+ dataset_name="CDAO/xview-subset-classification",
139
+ task="image-classification",
140
+ split="test",
141
+ )
142
+ IMAGE_H, IMAGE_W = 224, 224
143
+ transform = torchvision.transforms.Compose(
144
+ [
145
+ torchvision.transforms.Resize((IMAGE_H, IMAGE_W)),
146
+ torchvision.transforms.ToTensor(),
147
+ ]
148
+ )
149
+ jatic_dataset.set_transform(lambda x: {"image": transform(x["image"]), "label": x["label"]})
150
+ image = {'image': [i['image'].numpy() for i in jatic_dataset],
151
+ 'label': [i['label'] for i in jatic_dataset]}
152
+ elif dataset_type=="huggingface":
153
+ from maite import load_dataset
154
+ jatic_dataset = load_dataset(
155
+ provider=dataset_type,
156
+ dataset_name=dataset_path,
157
+ task="image-classification",
158
+ split=dataset_split,
159
+ drop_labels=False
160
+ )
161
+
162
+ image = {'image': [i['image'] for i in jatic_dataset],
163
+ 'label': [i['label'] for i in jatic_dataset]}
164
+ elif dataset_type=="torchvision":
165
+ from maite import load_dataset
166
+ jatic_dataset = load_dataset(
167
+ provider=dataset_type,
168
+ dataset_name=dataset_path,
169
+ task="image-classification",
170
+ split=dataset_split,
171
+ root='./data/',
172
+ download=True
173
+ )
174
+ image = {'image': [i['image'] for i in jatic_dataset],
175
+ 'label': [i['label'] for i in jatic_dataset]}
176
+ elif dataset_type=="Example CIFAR10":
177
+ from maite import load_dataset
178
+ jatic_dataset = load_dataset(
179
+ provider="torchvision",
180
+ dataset_name="CIFAR10",
181
+ task="image-classification",
182
+ split=dataset_split,
183
+ root='./data/',
184
+ download=True
185
+ )
186
+ image = {'image': [i['image'] for i in jatic_dataset][:100],
187
+ 'label': [i['label'] for i in jatic_dataset][:100]}
188
+
189
+ if model_type == "Example CIFAR10":
190
+ jptc = basic_cifar10_model()
191
+ elif model_type == "Example XView":
192
+ import torchvision
193
+ from heart.estimators.classification.pytorch import JaticPyTorchClassifier
194
+ classes = {
195
+ 0:'Building',
196
+ 1:'Construction Site',
197
+ 2:'Engineering Vehicle',
198
+ 3:'Fishing Vessel',
199
+ 4:'Oil Tanker',
200
+ 5:'Vehicle Lot'
201
+ }
202
+ model = torchvision.models.resnet18(False)
203
+ num_ftrs = model.fc.in_features
204
+ model.fc = torch.nn.Linear(num_ftrs, len(classes.keys()))
205
+ model.load_state_dict(torch.load('./utils/resources/models/xview_model.pt'))
206
+ _ = model.eval()
207
+ jptc = JaticPyTorchClassifier(
208
+ model=model, loss = torch.nn.CrossEntropyLoss(), input_shape=(3, 224, 224),
209
+ nb_classes=len(classes), clip_values=(0, 1), labels=list(classes.values())
210
+ )
211
+ elif model_type == "torchvision":
212
+ from maite.interop.torchvision import TorchVisionClassifier
213
+ from heart.estimators.classification.pytorch import JaticPyTorchClassifier
214
+
215
+ clf = TorchVisionClassifier.from_pretrained(model_path)
216
+ loss_fn = torch.nn.CrossEntropyLoss(reduction="sum")
217
+ jptc = JaticPyTorchClassifier(
218
+ model=clf._model, loss=loss_fn, input_shape=(model_channels, model_height, model_width),
219
+ nb_classes=len(clf._labels), clip_values=(0, model_clip), labels=clf._labels
220
+ )
221
+ elif model_type == "huggingface":
222
+ from maite.interop.huggingface import HuggingFaceImageClassifier
223
+ from heart.estimators.classification.pytorch import JaticPyTorchClassifier
224
+
225
+ clf = HuggingFaceImageClassifier.from_pretrained(model_path)
226
+ loss_fn = torch.nn.CrossEntropyLoss(reduction="sum")
227
+ jptc = JaticPyTorchClassifier(
228
+ model=clf._model, loss=loss_fn, input_shape=(model_channels, model_height, model_width),
229
+ nb_classes=len(clf._labels), clip_values=(0, model_clip), labels=clf._labels
230
+ )
231
+
232
+ if attack=="PGD":
233
+ from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_pytorch import ProjectedGradientDescentPyTorch
234
+ from heart.attacks.attack import JaticAttack
235
+ from heart.metrics import AccuracyPerturbationMetric
236
+ from torch.nn.functional import softmax
237
+ from maite.protocols import HasDataImage, is_typed_dict, ArrayLike
238
+
239
+ pgd_attack = ProjectedGradientDescentPyTorch(estimator=jptc, max_iter=args[7], eps=args[8],
240
+ eps_step=args[9], targeted=args[10]!="")
241
+ attack = JaticAttack(pgd_attack)
242
+
243
+ preds = jptc(image)
244
+ preds = softmax(torch.from_numpy(preds.logits), dim=1)
245
+ labels = {}
246
+ for i, label in enumerate(jptc.get_labels()):
247
+ labels[label] = preds[0][i]
248
+
249
+ if args[10]!="":
250
+ if is_typed_dict(image, HasDataImage):
251
+ data = {'image': image['image'], 'label': [args[10]]*len(image['image'])}
252
+ else:
253
+ data = {'image': image, 'label': [args[10]]}
254
+ else:
255
+ data = image
256
+
257
+ x_adv = attack.run_attack(data=data)
258
+ adv_preds = jptc(x_adv.adversarial_examples)
259
+ adv_preds = softmax(torch.from_numpy(adv_preds.logits), dim=1)
260
+ adv_labels = {}
261
+ for i, label in enumerate(jptc.get_labels()):
262
+ adv_labels[label] = adv_preds[0][i]
263
+
264
+ metric = AccuracyPerturbationMetric()
265
+ metric.update(jptc, jptc.device, image, x_adv.adversarial_examples)
266
+ clean_accuracy, robust_accuracy, perturbation_added = metric.compute()
267
+ metrics = pd.DataFrame([[clean_accuracy, robust_accuracy, perturbation_added]],
268
+ columns=['clean accuracy', 'robust accuracy', 'perturbation'])
269
+
270
+ adv_imgs = [img.transpose(1,2,0) for img in x_adv.adversarial_examples]
271
+ if is_typed_dict(image, HasDataImage):
272
+ image = image['image']
273
+ if not isinstance(image, list):
274
+ image = [image]
275
+
276
+ # in case where multiple images, use argmax to get the predicted label and add as caption
277
+ if dataset_type!="local":
278
+ temp = []
279
+ for i, img in enumerate(image):
280
+ if isinstance(img, ArrayLike):
281
+ temp.append((img.transpose(1,2,0), str(jptc.get_labels()[np.argmax(preds[i])]) ))
282
+ else:
283
+ temp.append((img, str(jptc.get_labels()[np.argmax(preds[i])]) ))
284
+ image = temp
285
+
286
+ temp = []
287
+ for i, img in enumerate(adv_imgs):
288
+ temp.append((img, str(jptc.get_labels()[np.argmax(adv_preds[i])]) ))
289
+ adv_imgs = temp
290
+
291
+ return [image, labels, adv_imgs, adv_labels, clean_accuracy, robust_accuracy, perturbation_added]
292
+
293
+ elif attack=="Adversarial Patch":
294
+ from art.attacks.evasion.adversarial_patch.adversarial_patch_pytorch import AdversarialPatchPyTorch
295
+ from heart.attacks.attack import JaticAttack
296
+ from heart.metrics import AccuracyPerturbationMetric
297
+ from torch.nn.functional import softmax
298
+ from maite.protocols import HasDataImage, is_typed_dict, ArrayLike
299
+
300
+ batch_size = 16
301
+ scale_min = 0.3
302
+ scale_max = 1.0
303
+ rotation_max = 0
304
+ learning_rate = 5000.
305
+ max_iter = 2000
306
+ patch_shape = (3, 14, 14)
307
+ patch_location = (18,18)
308
+
309
+ patch_attack = AdversarialPatchPyTorch(estimator=jptc, rotation_max=rotation_max, patch_location=(args[8], args[9]),
310
+ scale_min=scale_min, scale_max=scale_max, patch_type='square',
311
+ learning_rate=learning_rate, max_iter=args[7], batch_size=batch_size,
312
+ patch_shape=(3, args[10], args[11]), verbose=False, targeted=args[12]!="")
313
+
314
+ attack = JaticAttack(patch_attack)
315
+
316
+ preds = jptc(image)
317
+ preds = softmax(torch.from_numpy(preds.logits), dim=1)
318
+ labels = {}
319
+ for i, label in enumerate(jptc.get_labels()):
320
+ labels[label] = preds[0][i]
321
+
322
+ if args[12]!="":
323
+ if is_typed_dict(image, HasDataImage):
324
+ data = {'image': image['image'], 'label': [args[12]]*len(image['image'])}
325
+ else:
326
+ data = {'image': image, 'label': [args[12]]}
327
+ else:
328
+ data = image
329
+
330
+ attack_output = attack.run_attack(data=data)
331
+ adv_preds = jptc(attack_output.adversarial_examples)
332
+ adv_preds = softmax(torch.from_numpy(adv_preds.logits), dim=1)
333
+ adv_labels = {}
334
+ for i, label in enumerate(jptc.get_labels()):
335
+ adv_labels[label] = adv_preds[0][i]
336
+
337
+ metric = AccuracyPerturbationMetric()
338
+ metric.update(jptc, jptc.device, image, attack_output.adversarial_examples)
339
+ clean_accuracy, robust_accuracy, perturbation_added = metric.compute()
340
+ metrics = pd.DataFrame([[clean_accuracy, robust_accuracy, perturbation_added]],
341
+ columns=['clean accuracy', 'robust accuracy', 'perturbation'])
342
+
343
+ adv_imgs = [img.transpose(1,2,0) for img in attack_output.adversarial_examples]
344
+ if is_typed_dict(image, HasDataImage):
345
+ image = image['image']
346
+ if not isinstance(image, list):
347
+ image = [image]
348
+
349
+ # in case where multiple images, use argmax to get the predicted label and add as caption
350
+ if dataset_type!="local":
351
+ temp = []
352
+ for i, img in enumerate(image):
353
+
354
+ if isinstance(img, ArrayLike):
355
+ temp.append((img.transpose(1,2,0), str(jptc.get_labels()[np.argmax(preds[i])]) ))
356
+ else:
357
+ temp.append((img, str(jptc.get_labels()[np.argmax(preds[i])]) ))
358
+
359
+ image = temp
360
+
361
+ temp = []
362
+ for i, img in enumerate(adv_imgs):
363
+ temp.append((img, str(jptc.get_labels()[np.argmax(adv_preds[i])]) ))
364
+ adv_imgs = temp
365
+
366
+ patch, patch_mask = attack_output.adversarial_patch
367
+ patch_image = ((patch) * patch_mask).transpose(1,2,0)
368
+
369
+ return [image, labels, adv_imgs, adv_labels, clean_accuracy, robust_accuracy, patch_image]
370
+
371
+ def show_model_params(model_type):
372
+ '''
373
+ Show model parameters based on selected model type
374
+ '''
375
+ if model_type!="Example CIFAR10" and model_type!="Example XView":
376
+ return gr.Column(visible=True)
377
+ return gr.Column(visible=False)
378
+
379
+ def show_dataset_params(dataset_type):
380
+ '''
381
+ Show dataset parameters based on dataset type
382
+ '''
383
+ if dataset_type=="Example CIFAR10" or dataset_type=="Example XView":
384
+ return [gr.Column(visible=False), gr.Row(visible=False), gr.Row(visible=False)]
385
+ elif dataset_type=="local":
386
+ return [gr.Column(visible=True), gr.Row(visible=True), gr.Row(visible=False)]
387
+ return [gr.Column(visible=True), gr.Row(visible=False), gr.Row(visible=True)]
388
+
389
+ def pgd_show_label_output(dataset_type):
390
+ '''
391
+ Show PGD output component based on dataset type
392
+ '''
393
+ if dataset_type=="local":
394
+ return [gr.Label(visible=True), gr.Label(visible=True), gr.Number(visible=False), gr.Number(visible=False), gr.Number(visible=True)]
395
+ return [gr.Label(visible=False), gr.Label(visible=False), gr.Number(visible=True), gr.Number(visible=True), gr.Number(visible=True)]
396
+
397
+ def pgd_update_epsilon(clip_values):
398
+ '''
399
+ Update max value of PGD epsilon slider based on model clip values
400
+ '''
401
+ if clip_values == 255:
402
+ return gr.Slider(minimum=0.0001, maximum=255, label="Epslion", value=55)
403
+ return gr.Slider(minimum=0.0001, maximum=1, label="Epslion", value=0.05)
404
+
405
+ def patch_show_label_output(dataset_type):
406
+ '''
407
+ Show adversarial patch output components based on dataset type
408
+ '''
409
+ if dataset_type=="local":
410
+ return [gr.Label(visible=True), gr.Label(visible=True), gr.Number(visible=False), gr.Number(visible=False), gr.Number(visible=True)]
411
+ return [gr.Label(visible=False), gr.Label(visible=False), gr.Number(visible=True), gr.Number(visible=True), gr.Number(visible=True)]
412
+
413
+ def show_target_label_dataframe(dataset_type):
414
+ if dataset_type == "Example CIFAR10":
415
+ return gr.Dataframe(visible=True), gr.Dataframe(visible=False)
416
+ elif dataset_type == "Example XView":
417
+ return gr.Dataframe(visible=False), gr.Dataframe(visible=True)
418
+ return gr.Dataframe(visible=False), gr.Dataframe(visible=False)
419
+
420
+ # e.g. To use a local alternative theme: carbon_theme = Carbon()
421
+ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
422
+ gr.Markdown("<h1>HEART Adversarial Robustness Gradio Example</h1>")
423
+
424
+ with gr.Tab("Info"):
425
+ gr.Markdown('This is step 1. Using the tabs, select a task for evaluation.')
426
+
427
+ with gr.Tab("Classification", elem_classes="task-tab"):
428
+ gr.Markdown("Classifying images with a set of categories.")
429
+
430
+ # Model and Dataset Selection
431
+ with gr.Row():
432
+ # Model and Dataset type e.g. Torchvision, HuggingFace, local etc.
433
+ with gr.Column():
434
+ model_type = gr.Radio(label="Model type", choices=["Example CIFAR10", "Example XView", "torchvision"],
435
+ value="Example CIFAR10")
436
+ dataset_type = gr.Radio(label="Dataset", choices=["Example CIFAR10", "Example XView", "local", "torchvision", "huggingface"],
437
+ value="Example CIFAR10")
438
+ # Model parameters e.g. RESNET, VIT, input dimensions, clipping values etc.
439
+ with gr.Column(visible=False) as model_params:
440
+ model_path = gr.Textbox(placeholder="URL", label="Model path")
441
+ with gr.Row():
442
+ with gr.Column():
443
+ model_channels = gr.Textbox(placeholder="Integer, 3 for RGB images", label="Input Channels", value=3)
444
+ with gr.Column():
445
+ model_width = gr.Textbox(placeholder="Integer", label="Input Width", value=640)
446
+ with gr.Row():
447
+ with gr.Column():
448
+ model_height = gr.Textbox(placeholder="Integer", label="Input Height", value=480)
449
+ with gr.Column():
450
+ model_clip = gr.Radio(choices=[1, 255], label="Pixel clip", value=1)
451
+ # Dataset parameters e.g. Torchvision, HuggingFace, local etc.
452
+ with gr.Column(visible=False) as dataset_params:
453
+ with gr.Row() as local_image:
454
+ image = gr.Image(sources=['upload'], type="pil", height=150, width=150, elem_classes="input-image")
455
+ with gr.Row() as hosted_image:
456
+ dataset_path = gr.Textbox(placeholder="URL", label="Dataset path")
457
+ dataset_split = gr.Textbox(placeholder="test", label="Dataset split")
458
+
459
+ model_type.change(show_model_params, model_type, model_params)
460
+ dataset_type.change(show_dataset_params, dataset_type, [dataset_params, local_image, hosted_image])
461
+
462
+ # Attack Selection
463
+ with gr.Row():
464
+
465
+ with gr.Tab("Info"):
466
+ gr.Markdown("This is step 2. Select the type of attack for evaluation.")
467
+
468
+ with gr.Tab("White Box"):
469
+ gr.Markdown("White box attacks assume the attacker has __full access__ to the model.")
470
+
471
+ with gr.Tab("Info"):
472
+ gr.Markdown("This is step 3. Select the type of white-box attack to evaluate.")
473
+
474
+ with gr.Tab("Evasion"):
475
+ gr.Markdown("Evasion attacks are deployed to cause a model to incorrectly classify or detect items/objects in an image.")
476
+
477
+ with gr.Tab("Info"):
478
+ gr.Markdown("This is step 4. Select the type of Evasion attack to evaluate.")
479
+
480
+ with gr.Tab("Projected Gradient Descent"):
481
+ gr.Markdown("This attack uses PGD to identify adversarial examples.")
482
+
483
+
484
+ with gr.Row():
485
+
486
+ with gr.Column():
487
+ attack = gr.Textbox(visible=True, value="PGD", label="Attack", interactive=False)
488
+ max_iter = gr.Slider(minimum=1, maximum=5000, label="Max iterations", value=1000)
489
+ eps = gr.Slider(minimum=0.0001, maximum=1, label="Epslion", value=0.05)
490
+ eps_steps = gr.Slider(minimum=0.001, maximum=1000, label="Epsilon steps", value=0.1)
491
+ targeted = gr.Textbox(placeholder="Target label (integer)", label="Target")
492
+ with gr.Accordion("Target mapping", open=False):
493
+ cifar_labels = gr.Dataframe(pd.DataFrame(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'],
494
+ columns=['label']).rename_axis('target').reset_index(),
495
+ visible=True, elem_classes=["small-font", "df-padding"],
496
+ type="pandas",interactive=False)
497
+ xview_labels = gr.Dataframe(pd.DataFrame(['Building', 'Construction Site', 'Engineering Vehicle', 'Fishing Vessel', 'Oil Tanker',
498
+ 'Vehicle Lot'],
499
+ columns=['label']).rename_axis('target').reset_index(),
500
+ visible=False, elem_classes=["small-font", "df-padding"],
501
+ type="pandas",interactive=False)
502
+ eval_btn_pgd = gr.Button("Evaluate")
503
+ model_clip.change(pgd_update_epsilon, model_clip, eps)
504
+ dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
505
+
506
+ # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
507
+ with gr.Column():
508
+ with gr.Row():
509
+ with gr.Column():
510
+ original_gallery = gr.Gallery(label="Original", preview=True, height=600)
511
+ benign_output = gr.Label(num_top_classes=3, visible=False)
512
+ clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
513
+
514
+ with gr.Column():
515
+ adversarial_gallery = gr.Gallery(label="Adversarial", preview=True, height=600)
516
+ adversarial_output = gr.Label(num_top_classes=3, visible=False)
517
+ robust_accuracy = gr.Number(label="Robust Accuracy", precision=2)
518
+ perturbation_added = gr.Number(label="Perturbation Added", precision=2)
519
+
520
+ dataset_type.change(pgd_show_label_output, dataset_type, [benign_output, adversarial_output,
521
+ clean_accuracy, robust_accuracy, perturbation_added])
522
+ eval_btn_pgd.click(clf_evasion_evaluate, inputs=[attack, model_type, model_path, model_channels, model_height, model_width,
523
+ model_clip, max_iter, eps, eps_steps, targeted,
524
+ dataset_type, dataset_path, dataset_split, image],
525
+ outputs=[original_gallery, benign_output, adversarial_gallery, adversarial_output, clean_accuracy,
526
+ robust_accuracy, perturbation_added], api_name='patch')
527
+
528
+ with gr.Row():
529
+ clear_btn = gr.ClearButton([image, targeted, original_gallery, benign_output, clean_accuracy,
530
+ adversarial_gallery, adversarial_output, robust_accuracy, perturbation_added])
531
+
532
+
533
+
534
+ with gr.Tab("Adversarial Patch"):
535
+ gr.Markdown("This attack crafts an adversarial patch that facilitates evasion.")
536
+
537
+ with gr.Row():
538
+
539
+ with gr.Column():
540
+ attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
541
+ max_iter = gr.Slider(minimum=1, maximum=5000, label="Max iterations", value=100)
542
+ x_location = gr.Slider(minimum=1, maximum=640, label="Location (x)", value=18)
543
+ y_location = gr.Slider(minimum=1, maximum=480, label="Location (y)", value=18)
544
+ patch_height = gr.Slider(minimum=1, maximum=640, label="Patch height", value=18)
545
+ patch_width = gr.Slider(minimum=1, maximum=480, label="Patch width", value=18)
546
+ targeted = gr.Textbox(placeholder="Target label (integer)", label="Target")
547
+ with gr.Accordion("Target mapping", open=False):
548
+ cifar_labels = gr.Dataframe(pd.DataFrame(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'],
549
+ columns=['label']).rename_axis('target').reset_index(),
550
+ visible=True, elem_classes=["small-font", "df-padding"],
551
+ type="pandas",interactive=False)
552
+ xview_labels = gr.Dataframe(pd.DataFrame(['Building', 'Construction Site', 'Engineering Vehicle', 'Fishing Vessel', 'Oil Tanker',
553
+ 'Vehicle Lot'],
554
+ columns=['label']).rename_axis('target').reset_index(),
555
+ visible=False, elem_classes=["small-font", "df-padding"],
556
+ type="pandas",interactive=False)
557
+ eval_btn_patch = gr.Button("Evaluate")
558
+ model_clip.change()
559
+ dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
560
+
561
+ # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
562
+ with gr.Column():
563
+ with gr.Row():
564
+ with gr.Column():
565
+ original_gallery = gr.Gallery(label="Original", preview=True, height=600)
566
+ benign_output = gr.Label(num_top_classes=3, visible=False)
567
+ clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
568
+
569
+ with gr.Column():
570
+ adversarial_gallery = gr.Gallery(label="Adversarial", preview=True, height=600)
571
+ adversarial_output = gr.Label(num_top_classes=3, visible=False)
572
+ robust_accuracy = gr.Number(label="Robust Accuracy", precision=2)
573
+ patch_image = gr.Image(label="Adversarial Patch")
574
+
575
+ dataset_type.change(patch_show_label_output, dataset_type, [benign_output, adversarial_output,
576
+ clean_accuracy, robust_accuracy, patch_image])
577
+ eval_btn_patch.click(clf_evasion_evaluate, inputs=[attack, model_type, model_path, model_channels, model_height, model_width,
578
+ model_clip, max_iter, x_location, y_location, patch_height, patch_width, targeted,
579
+ dataset_type, dataset_path, dataset_split, image],
580
+ outputs=[original_gallery, benign_output, adversarial_gallery, adversarial_output, clean_accuracy,
581
+ robust_accuracy, patch_image])
582
+
583
+ with gr.Row():
584
+ clear_btn = gr.ClearButton([image, targeted, original_gallery, benign_output, clean_accuracy,
585
+ adversarial_gallery, adversarial_output, robust_accuracy, patch_image])
586
+
587
+ with gr.Tab("Poisoning"):
588
+ gr.Markdown("Coming soon.")
589
+
590
+ with gr.Tab("Black Box"):
591
+ gr.Markdown("Black box attacks assume the attacker __does not__ have full access to the model but can query it for predictions.")
592
+
593
+ with gr.Tab("Info"):
594
+ gr.Markdown("This is step 3. Select the type of black-box attack to evaluate.")
595
+
596
+ with gr.Tab("Evasion"):
597
+
598
+ gr.Markdown("Evasion attacks are deployed to cause a model to incorrectly classify or detect items/objects in an image.")
599
+
600
+ with gr.Tab("Info"):
601
+ gr.Markdown("This is step 4. Select the type of Evasion attack to evaluate.")
602
+
603
+ with gr.Tab("HopSkipJump"):
604
+ gr.Markdown("Coming soon.")
605
+
606
+ with gr.Tab("Square Attack"):
607
+ gr.Markdown("Coming soon.")
608
+
609
+ with gr.Tab("AutoAttack"):
610
+ gr.Markdown("Coming soon.")
611
+
612
+
613
+ with gr.Tab("Object Detection"):
614
+ gr.Markdown("Extracting objects from images and identifying their category.")
615
+ gr.Markdown("Coming soon.")
616
+
617
+ if __name__ == "__main__":
618
+
619
+ import os, sys, subprocess
620
+
621
+ # Huggingface does not support LFS via external https, disable smudge
622
+ os.putenv('GIT_LFS_SKIP_SMUDGE', '1')
623
+
624
+ HEART_USER=os.environ['HEART_USER']
625
+ HEART_TOKEN=os.environ['HEART_TOKEN']
626
+
627
+ HEART_INSTALL=f"git+https://{HEART_USER}:{HEART_TOKEN}@gitlab.jatic.net/jatic/ibm/hardened-extension-adversarial-robustness-toolbox.git@HEART-Gradio"
628
+
629
+ subprocess.run([sys.executable, '-m', 'pip', 'install', HEART_INSTALL])
630
+
631
+ # during development, set debug=True
632
+ demo.launch()
633
+
carbon_colors.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+
4
+ class Color:
5
+ all = []
6
+
7
+ def __init__(
8
+ self,
9
+ c50: str,
10
+ c100: str,
11
+ c200: str,
12
+ c300: str,
13
+ c400: str,
14
+ c500: str,
15
+ c600: str,
16
+ c700: str,
17
+ c800: str,
18
+ c900: str,
19
+ c950: str,
20
+ name: str | None = None,
21
+ ):
22
+ self.c50 = c50
23
+ self.c100 = c100
24
+ self.c200 = c200
25
+ self.c300 = c300
26
+ self.c400 = c400
27
+ self.c500 = c500
28
+ self.c600 = c600
29
+ self.c700 = c700
30
+ self.c800 = c800
31
+ self.c900 = c900
32
+ self.c950 = c950
33
+ self.name = name
34
+ Color.all.append(self)
35
+
36
+ def expand(self) -> list[str]:
37
+ return [
38
+ self.c50,
39
+ self.c100,
40
+ self.c200,
41
+ self.c300,
42
+ self.c400,
43
+ self.c500,
44
+ self.c600,
45
+ self.c700,
46
+ self.c800,
47
+ self.c900,
48
+ self.c950,
49
+ ]
50
+
51
+
52
+ black = Color(
53
+ name="black",
54
+ c50="#000000",
55
+ c100="#000000",
56
+ c200="#000000",
57
+ c300="#000000",
58
+ c400="#000000",
59
+ c500="#000000",
60
+ c600="#000000",
61
+ c700="#000000",
62
+ c800="#000000",
63
+ c900="#000000",
64
+ c950="#000000",
65
+ )
66
+
67
+ blackHover = Color(
68
+ name="blackHover",
69
+ c50="#212121",
70
+ c100="#212121",
71
+ c200="#212121",
72
+ c300="#212121",
73
+ c400="#212121",
74
+ c500="#212121",
75
+ c600="#212121",
76
+ c700="#212121",
77
+ c800="#212121",
78
+ c900="#212121",
79
+ c950="#212121",
80
+ )
81
+
82
+ white = Color(
83
+ name="white",
84
+ c50="#ffffff",
85
+ c100="#ffffff",
86
+ c200="#ffffff",
87
+ c300="#ffffff",
88
+ c400="#ffffff",
89
+ c500="#ffffff",
90
+ c600="#ffffff",
91
+ c700="#ffffff",
92
+ c800="#ffffff",
93
+ c900="#ffffff",
94
+ c950="#ffffff",
95
+ )
96
+
97
+ whiteHover = Color(
98
+ name="whiteHover",
99
+ c50="#e8e8e8",
100
+ c100="#e8e8e8",
101
+ c200="#e8e8e8",
102
+ c300="#e8e8e8",
103
+ c400="#e8e8e8",
104
+ c500="#e8e8e8",
105
+ c600="#e8e8e8",
106
+ c700="#e8e8e8",
107
+ c800="#e8e8e8",
108
+ c900="#e8e8e8",
109
+ c950="#e8e8e8",
110
+ )
111
+
112
+ red = Color(
113
+ name="red",
114
+ c50="#fff1f1",
115
+ c100="#ffd7d9",
116
+ c200="#ffb3b8",
117
+ c300="#ff8389",
118
+ c400="#fa4d56",
119
+ c500="#da1e28",
120
+ c600="#a2191f",
121
+ c700="#750e13",
122
+ c800="#520408",
123
+ c900="#2d0709",
124
+ c950="#2d0709",
125
+ )
126
+
127
+ redHover = Color(
128
+ name="redHover",
129
+ c50="#540d11",
130
+ c100="#66050a",
131
+ c200="#921118",
132
+ c300="#c21e25",
133
+ c400="#b81922",
134
+ c500="#ee0713",
135
+ c600="#ff6168",
136
+ c700="#ff99a0",
137
+ c800="#ffc2c5",
138
+ c900="#ffe0e0",
139
+ c950="#ffe0e0",
140
+ )
141
+
142
+ blue = Color(
143
+ name="blue",
144
+ c50="#edf5ff",
145
+ c100="#d0e2ff",
146
+ c200="#a6c8ff",
147
+ c300="#78a9ff",
148
+ c400="#4589ff",
149
+ c500="#0f62fe",
150
+ c600="#0043ce",
151
+ c700="#002d9c",
152
+ c800="#001d6c",
153
+ c900="#001141",
154
+ c950="#001141",
155
+ )
156
+
157
+ blueHover = Color(
158
+ name="blueHover",
159
+
160
+ c50="#001f75",
161
+ c100="#00258a",
162
+ c200="#0039c7",
163
+ c300="#0053ff",
164
+ c400="#0050e6",
165
+ c500="#1f70ff",
166
+ c600="#5c97ff",
167
+ c700="#8ab6ff",
168
+ c800="#b8d3ff",
169
+ c900="#dbebff",
170
+ c950="#dbebff",
171
+ )
172
+
173
+
carbon_theme.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Iterable
4
+
5
+ from gradio.themes.base import Base
6
+ from gradio.themes.utils import colors, fonts, sizes
7
+ import carbon_colors
8
+
9
+
10
+ class Carbon(Base):
11
+ def __init__(
12
+ self,
13
+ *,
14
+ primary_hue: carbon_colors.Color | str = carbon_colors.white,
15
+ secondary_hue: carbon_colors.Color | str = carbon_colors.red,
16
+ neutral_hue: carbon_colors.Color | str = carbon_colors.blue,
17
+ spacing_size: sizes.Size | str = sizes.spacing_lg,
18
+ radius_size: sizes.Size | str = sizes.radius_none,
19
+ text_size: sizes.Size | str = sizes.text_md,
20
+ font: fonts.Font
21
+ | str
22
+ | Iterable[fonts.Font | str] = (
23
+ fonts.GoogleFont("IBM Plex Mono"),
24
+ fonts.GoogleFont("IBM Plex Sans"),
25
+ fonts.GoogleFont("IBM Plex Serif"),
26
+ ),
27
+ font_mono: fonts.Font
28
+ | str
29
+ | Iterable[fonts.Font | str] = (
30
+ fonts.GoogleFont("IBM Plex Mono"),
31
+ ),
32
+ ):
33
+ super().__init__(
34
+ primary_hue=primary_hue,
35
+ secondary_hue=secondary_hue,
36
+ neutral_hue=neutral_hue,
37
+ spacing_size=spacing_size,
38
+ radius_size=radius_size,
39
+ text_size=text_size,
40
+ font=font,
41
+ font_mono=font_mono,
42
+ )
43
+ self.name = "carbon"
44
+ super().set(
45
+ # Colors
46
+ slider_color="*neutral_900",
47
+ slider_color_dark="*neutral_500",
48
+ body_text_color="*neutral_900",
49
+ block_label_text_color="*body_text_color",
50
+ block_title_text_color="*body_text_color",
51
+ body_text_color_subdued="*neutral_700",
52
+ background_fill_primary_dark="*neutral_900",
53
+ background_fill_secondary_dark="*neutral_800",
54
+ block_background_fill_dark="*neutral_800",
55
+ input_background_fill_dark="*neutral_700",
56
+ # Button Colors
57
+ button_primary_background_fill=carbon_colors.blue.c500,
58
+ button_primary_background_fill_hover="*neutral_300",
59
+ button_primary_text_color="white",
60
+ button_primary_background_fill_dark="*neutral_600",
61
+ button_primary_background_fill_hover_dark="*neutral_600",
62
+ button_primary_text_color_dark="white",
63
+ button_secondary_background_fill="*button_primary_background_fill",
64
+ button_secondary_background_fill_hover="*button_primary_background_fill_hover",
65
+ button_secondary_text_color="*button_primary_text_color",
66
+ button_cancel_background_fill="*button_primary_background_fill",
67
+ button_cancel_background_fill_hover="*button_primary_background_fill_hover",
68
+ button_cancel_text_color="*button_primary_text_color",
69
+ checkbox_background_color=carbon_colors.black.c50,
70
+ checkbox_label_background_fill="*button_primary_background_fill",
71
+ checkbox_label_background_fill_hover="*button_primary_background_fill_hover",
72
+ checkbox_label_text_color="*button_primary_text_color",
73
+ checkbox_background_color_selected=carbon_colors.black.c50,
74
+ checkbox_border_width="1px",
75
+ checkbox_border_width_dark="1px",
76
+ checkbox_border_color=carbon_colors.white.c50,
77
+ checkbox_border_color_dark=carbon_colors.white.c50,
78
+
79
+ checkbox_border_color_focus=carbon_colors.blue.c900,
80
+ checkbox_border_color_focus_dark=carbon_colors.blue.c900,
81
+ checkbox_border_color_selected=carbon_colors.white.c50,
82
+ checkbox_border_color_selected_dark=carbon_colors.white.c50,
83
+
84
+ checkbox_background_color_hover=carbon_colors.black.c50,
85
+ checkbox_background_color_hover_dark=carbon_colors.black.c50,
86
+ checkbox_background_color_dark=carbon_colors.black.c50,
87
+ checkbox_background_color_selected_dark=carbon_colors.black.c50,
88
+ # Padding
89
+ checkbox_label_padding="16px",
90
+ button_large_padding="*spacing_lg",
91
+ button_small_padding="*spacing_sm",
92
+ # Borders
93
+ block_border_width="0px",
94
+ block_border_width_dark="1px",
95
+ shadow_drop_lg="0 1px 4px 0 rgb(0 0 0 / 0.1)",
96
+ block_shadow="*shadow_drop_lg",
97
+ block_shadow_dark="none",
98
+ # Block Labels
99
+ block_title_text_weight="600",
100
+ block_label_text_weight="600",
101
+ block_label_text_size="*text_md",
102
+ )
requirements.txt ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy>=1.18.5,<1.25
2
+ scipy==1.10.1
3
+ matplotlib==3.7.1
4
+ scikit-learn>=0.22.2,<1.2.0
5
+ six==1.16.0
6
+ Pillow>=10.1.0
7
+ tqdm==4.65.0
8
+ statsmodels==0.13.5
9
+ pydub==0.25.1
10
+ resampy==0.4.2
11
+ ffmpeg-python==0.2.0
12
+ cma==3.3.0
13
+ pandas==2.0.1
14
+ librosa==0.10.0.post2
15
+ numba~=0.56.4
16
+ opencv-python
17
+ sortedcontainers==2.4.0
18
+ h5py==3.8.0
19
+
20
+ jupyter>=1.0.0
21
+ pytest~=7.3.1
22
+ pytest-flake8~=1.1.1
23
+ flake8~=4.0.1
24
+ pytest-mock~=3.10.0
25
+ pytest-cov~=4.0.0
26
+ requests~=2.31.0
27
+
28
+ --find-links https://download.pytorch.org/whl/cu116/torch_stable.html
29
+ torch==1.13.1
30
+ torchaudio==0.13.1
31
+ torchvision==0.14.1
32
+
33
+ mxnet-native==1.8.0.post0; sys_platform != "darwin"
34
+
35
+ tensorflow==2.10.1; sys_platform != "darwin"
36
+ keras==2.10.0; sys_platform != "darwin"
37
+ tensorflow-addons>=0.13.0; sys_platform != "darwin"
38
+
39
+ catboost==1.1.1
40
+ xgboost==1.7.5
41
+ yolov5==7.0.13
42
+
43
+ multiprocess
44
+ gradio>=4.13.0
45
+
46
+ kornia~=0.6.12
47
+ tensorboardX==2.6
48
+ lief==0.12.3
49
+
50
+ pylint==2.12.2
51
+ mypy==1.4.1
52
+ pycodestyle==2.8.0
53
+ black==22.3.0
54
+ isort==5.12.0
55
+
56
+
57
+
setup.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import codecs
2
+ import os
3
+
4
+ from setuptools import find_packages, setup
5
+
6
+ install_requires = [
7
+ "maite==0.3.4",
8
+ "adversarial-robustness-toolbox==1.16.0",
9
+ "scikit-learn>=0.22.2,<1.2.0",
10
+ "six",
11
+ "setuptools",
12
+ "tqdm",
13
+ ]
14
+
15
+
16
+ def read(rel_path):
17
+ here = os.path.abspath(os.path.dirname(__file__))
18
+ with codecs.open(os.path.join(here, rel_path), "r", encoding="utf-8") as fp:
19
+ return fp.read()
20
+
21
+
22
+ def get_version(rel_path):
23
+ for line in read(rel_path).splitlines():
24
+ if line.startswith("__version__"):
25
+ delim = '"' if '"' in line else "'"
26
+ return line.split(delim)[1]
27
+ raise RuntimeError("Unable to find version string.")
28
+
29
+
30
+ setup(
31
+ name="hardened-extension-adversarial-robustness-toolbox",
32
+ version=get_version("src/heart/__init__.py"),
33
+ description="Extension for ART compatible with MAITE.",
34
+ author="IBM",
35
+ author_email="<email>",
36
+ maintainer="IBM",
37
+ maintainer_email="<email>",
38
+ license="MIT",
39
+ install_requires=install_requires,
40
+ include_package_data=True,
41
+ python_requires=">=3.9,<3.11",
42
+ )
utils/data/coco_elephant.jpg ADDED
utils/resources/models/B_CONV2D_MNIST.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5055f6cfa6a1415ec435db8f95064b92d95b22957221cd229729c26807df4c2e
3
+ size 136
utils/resources/models/B_CONV2D_NO_MPOOL_CIFAR10.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875b9894e90614aef8c1275ebd2889bd046ef2dbd66e153e1b3fcb9e66f74417
3
+ size 192
utils/resources/models/B_CONV2D_NO_MPOOL_MNIST.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fdda8d4df7f3e64591201e4a579cd48d74e0896fb73522431931d784975e9cc
3
+ size 192
utils/resources/models/B_DENSE_MNIST.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:801761921e1e4c06a245ca886cd5f435e569b1abc96ce7d3522086d41b594ba6
3
+ size 208
utils/resources/models/B_DENSE_NO_MPOOL_CIFAR10.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baa626785dc9f7ce74f22224843be4b667275fd82558b75e2bf8743e55e679da
3
+ size 168
utils/resources/models/B_DENSE_NO_MPOOL_MNIST.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5125647216a7c912ea5c8ff6b00058857f9eb5b8fb3c232fd386b33c79890e2f
3
+ size 168
utils/resources/models/W_CONV2D_MNIST.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fec9764619d2f503e56469fe6ccad4db05f85c2a587e7dad36439169dc7a266
3
+ size 520
utils/resources/models/W_CONV2D_NO_MPOOL_CIFAR10.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c56caa7259b6ec70ba648dd3cda3877ab8867e1c90a4254f88af8a522ab3fdeb
3
+ size 3200
utils/resources/models/W_CONV2D_NO_MPOOL_MNIST.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ce8ae26c148260be1c5a695c4bf208e02995930f53b4d27e1e2a3eb2ba44914
3
+ size 1152
utils/resources/models/W_DENSE_MNIST.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a78c7325a0fac4632906fb69ff4debf09b6bd472ff62ce2d09a330b1a35de21e
3
+ size 2128
utils/resources/models/W_DENSE_NO_MPOOL_CIFAR10.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0471f9673506a8477ea325c632f2ef46f11551a58e54159d6806d3d2b4bd609
3
+ size 64128
utils/resources/models/W_DENSE_NO_MPOOL_MNIST.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:508dfc4753ababa0d129c4e62f3b1808d677d0fc91d08aeffbf8a25b8d84dbb9
3
+ size 51968
utils/resources/models/xview_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98916a35846c28d91a582684237409ad2222897e1b2409a70e821885d6963c2f
3
+ size 44795517