lockwooda commited on
Commit
faff2dc
β€’
1 Parent(s): eff2de4

push updates to cdao prod

Browse files
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: indigo
5
  colorTo: blue
6
  sdk: gradio
7
  sdk_version: 4.13.0
8
- app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
 
5
  colorTo: blue
6
  sdk: gradio
7
  sdk_version: 4.13.0
8
+ app_file: gradio/app.py
9
  pinned: false
10
  license: mit
11
  ---
app.py β†’ gradio/app.py RENAMED
@@ -24,7 +24,224 @@ css = """
24
  padding-left: 50px !important;
25
  padding-right: 50px !important;
26
  }
 
 
 
 
 
27
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  def extract_predictions(predictions_, conf_thresh):
30
  coco_labels = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
@@ -109,7 +326,7 @@ def basic_cifar10_model():
109
  '''
110
  Load an example CIFAR10 model
111
  '''
112
- from heart.estimators.classification.pytorch import JaticPyTorchClassifier
113
 
114
  labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
115
  path = './'
@@ -230,7 +447,7 @@ def det_evasion_evaluate(*args):
230
  image = np.array(coco_images)*255
231
 
232
  if model_type == "YOLOv5":
233
- from heart.estimators.object_detection.pytorch_yolo import JaticPyTorchYolo
234
  coco_labels = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
235
  'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
236
  'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
@@ -251,8 +468,8 @@ def det_evasion_evaluate(*args):
251
  if attack=="PGD":
252
 
253
  from art.attacks.evasion import ProjectedGradientDescent
254
- from heart.attacks.attack import JaticAttack
255
- from heart.metrics import AccuracyPerturbationMetric
256
  from torch.nn.functional import softmax
257
  from maite.protocols import HasDataImage, is_typed_dict
258
 
@@ -308,11 +525,7 @@ def det_evasion_evaluate(*args):
308
 
309
  elif attack=="Adversarial Patch":
310
  from art.attacks.evasion.adversarial_patch.adversarial_patch_pytorch import AdversarialPatchPyTorch
311
- from heart.attacks.attack import JaticAttack
312
- from heart.metrics import AccuracyPerturbationMetric
313
- from torch.nn.functional import softmax
314
- from maite.protocols import HasDataImage, is_typed_dict
315
-
316
 
317
  batch_size = 16
318
  scale_min = 0.3
@@ -321,9 +534,9 @@ def det_evasion_evaluate(*args):
321
  learning_rate = 5000.
322
 
323
  patch_attack = AdversarialPatchPyTorch(estimator=detector, rotation_max=rotation_max, patch_location=(args[8], args[9]),
324
- scale_min=scale_min, scale_max=scale_max, patch_type='circle',
325
  learning_rate=learning_rate, max_iter=args[7], batch_size=batch_size,
326
- patch_shape=(3, args[10], args[11]), verbose=False, targeted=args[-4]=="Yes")
327
 
328
  attack = JaticAttack(patch_attack)
329
 
@@ -368,8 +581,10 @@ def det_evasion_evaluate(*args):
368
  adv_imgs = []
369
  for i, img in enumerate(out_imgs):
370
  adv_imgs.append(img.astype(np.uint8))
371
-
372
- return [image, adv_imgs]
 
 
373
 
374
  def clf_evasion_evaluate(*args):
375
  '''
@@ -449,7 +664,7 @@ def clf_evasion_evaluate(*args):
449
  jptc = basic_cifar10_model()
450
  elif model_type == "Example XView":
451
  import torchvision
452
- from heart.estimators.classification.pytorch import JaticPyTorchClassifier
453
  classes = {
454
  0:'Building',
455
  1:'Construction Site',
@@ -469,7 +684,7 @@ def clf_evasion_evaluate(*args):
469
  )
470
  elif model_type == "torchvision":
471
  from maite.interop.torchvision import TorchVisionClassifier
472
- from heart.estimators.classification.pytorch import JaticPyTorchClassifier
473
 
474
  clf = TorchVisionClassifier.from_pretrained(model_path)
475
  loss_fn = torch.nn.CrossEntropyLoss(reduction="sum")
@@ -479,7 +694,7 @@ def clf_evasion_evaluate(*args):
479
  )
480
  elif model_type == "huggingface":
481
  from maite.interop.huggingface import HuggingFaceImageClassifier
482
- from heart.estimators.classification.pytorch import JaticPyTorchClassifier
483
 
484
  clf = HuggingFaceImageClassifier.from_pretrained(model_path)
485
  loss_fn = torch.nn.CrossEntropyLoss(reduction="sum")
@@ -490,8 +705,8 @@ def clf_evasion_evaluate(*args):
490
 
491
  if attack=="PGD":
492
  from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_pytorch import ProjectedGradientDescentPyTorch
493
- from heart.attacks.attack import JaticAttack
494
- from heart.metrics import AccuracyPerturbationMetric
495
  from torch.nn.functional import softmax
496
  from maite.protocols import HasDataImage, is_typed_dict, ArrayLike
497
 
@@ -551,8 +766,8 @@ def clf_evasion_evaluate(*args):
551
 
552
  elif attack=="Adversarial Patch":
553
  from art.attacks.evasion.adversarial_patch.adversarial_patch_pytorch import AdversarialPatchPyTorch
554
- from heart.attacks.attack import JaticAttack
555
- from heart.metrics import AccuracyPerturbationMetric
556
  from torch.nn.functional import softmax
557
  from maite.protocols import HasDataImage, is_typed_dict, ArrayLike
558
 
@@ -568,7 +783,7 @@ def clf_evasion_evaluate(*args):
568
  patch_attack = AdversarialPatchPyTorch(estimator=jptc, rotation_max=rotation_max, patch_location=(args[8], args[9]),
569
  scale_min=scale_min, scale_max=scale_max, patch_type='square',
570
  learning_rate=learning_rate, max_iter=args[7], batch_size=batch_size,
571
- patch_shape=(3, args[10], args[11]), verbose=False, targeted=args[12]!="")
572
 
573
  attack = JaticAttack(patch_attack)
574
 
@@ -578,11 +793,11 @@ def clf_evasion_evaluate(*args):
578
  for i, label in enumerate(jptc.get_labels()):
579
  labels[label] = preds[0][i]
580
 
581
- if args[12]!="":
582
  if is_typed_dict(image, HasDataImage):
583
- data = {'image': image['image'], 'label': [args[12]]*len(image['image'])}
584
  else:
585
- data = {'image': image, 'label': [args[12]]}
586
  else:
587
  data = image
588
 
@@ -755,6 +970,7 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
755
  columns=['label']).rename_axis('target').reset_index(),
756
  visible=False, elem_classes=["small-font", "df-padding"],
757
  type="pandas",interactive=False)
 
758
  eval_btn_pgd = gr.Button("Evaluate")
759
  model_clip.change(pgd_update_epsilon, model_clip, eps)
760
  dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
@@ -791,14 +1007,40 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
791
  with gr.Row():
792
 
793
  with gr.Column(scale=1):
794
- attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
795
- max_iter = gr.Slider(minimum=1, maximum=20, label="Max iterations", value=2, step=1)
796
- x_location = gr.Slider(minimum=1, maximum=640, label="Location (x)", value=18, step=1)
797
- y_location = gr.Slider(minimum=1, maximum=480, label="Location (y)", value=18, step=1)
798
- patch_height = gr.Slider(minimum=1, maximum=640, label="Patch height", value=18, step=1)
799
- patch_width = gr.Slider(minimum=1, maximum=480, label="Patch width", value=18, step=1)
800
- targeted = gr.Textbox(placeholder="Target label (integer)", label="Target")
801
- with gr.Accordion("Target mapping", open=False):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
802
  cifar_labels = gr.Dataframe(pd.DataFrame(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'],
803
  columns=['label']).rename_axis('target').reset_index(),
804
  visible=True, elem_classes=["small-font", "df-padding"],
@@ -808,31 +1050,31 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
808
  columns=['label']).rename_axis('target').reset_index(),
809
  visible=False, elem_classes=["small-font", "df-padding"],
810
  type="pandas",interactive=False)
811
- eval_btn_patch = gr.Button("Evaluate")
812
- model_clip.change()
813
- dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
814
-
815
  # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
 
816
  with gr.Column(scale=2):
817
- with gr.Row():
818
- with gr.Column():
819
- original_gallery = gr.Gallery(label="Original", preview=True, height=600)
820
- benign_output = gr.Label(num_top_classes=3, visible=False)
821
- clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
822
-
823
- with gr.Column():
824
- adversarial_gallery = gr.Gallery(label="Adversarial", preview=True, height=600)
825
- adversarial_output = gr.Label(num_top_classes=3, visible=False)
826
- robust_accuracy = gr.Number(label="Robust Accuracy", precision=2)
827
- patch_image = gr.Image(label="Adversarial Patch")
828
-
829
- dataset_type.change(patch_show_label_output, dataset_type, [benign_output, adversarial_output,
830
- clean_accuracy, robust_accuracy, patch_image])
831
- eval_btn_patch.click(clf_evasion_evaluate, inputs=[attack, model_type, model_path, model_channels, model_height, model_width,
832
- model_clip, max_iter, x_location, y_location, patch_height, patch_width, targeted,
833
- dataset_type, dataset_path, dataset_split, image],
834
- outputs=[original_gallery, benign_output, adversarial_gallery, adversarial_output, clean_accuracy,
835
- robust_accuracy, patch_image])
836
 
837
  with gr.Row():
838
  clear_btn = gr.ClearButton([image, targeted, original_gallery, benign_output, clean_accuracy,
@@ -930,40 +1172,64 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
930
  with gr.Row():
931
  clear_btn = gr.ClearButton([image, original_gallery,
932
  adversarial_gallery])
933
-
934
-
935
-
936
  with gr.Tab("Adversarial Patch"):
937
  gr.Markdown("This attack crafts an adversarial patch that facilitates evasion.")
938
 
939
  with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
940
 
941
  with gr.Column(scale=1):
942
- attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
943
- max_iter = gr.Slider(minimum=1, maximum=100, label="Max iterations", value=1, step=1)
944
- x_location = gr.Slider(minimum=1, maximum=640, label="Location (x)", value=100, step=1)
945
- y_location = gr.Slider(minimum=1, maximum=480, label="Location (y)", value=100, step=1)
946
- patch_height = gr.Slider(minimum=1, maximum=640, label="Patch height", value=100, step=1)
947
- patch_width = gr.Slider(minimum=1, maximum=480, label="Patch width", value=100, step=1)
948
- targeted = gr.Radio(choices=['Yes', 'No'], value='No', label="Targeted")
949
- det_threshold = gr.Slider(minimum=0.0, maximum=100, label="Detection threshold", value=0.2)
950
- eval_btn_patch = gr.Button("Evaluate")
951
- model_clip.change()
952
 
 
 
 
 
 
 
 
 
 
953
  # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
954
  with gr.Column(scale=3):
955
  with gr.Row():
956
- with gr.Column():
957
  original_gallery = gr.Gallery(label="Original", preview=True, show_download_button=True, height=600)
958
 
959
- with gr.Column():
960
  adversarial_gallery = gr.Gallery(label="Adversarial", preview=True, show_download_button=True, height=600)
 
 
 
961
 
962
  dataset_type.change(patch_show_label_output, dataset_type, [adversarial_output, ])
963
  eval_btn_patch.click(det_evasion_evaluate, inputs=[attack, model_type, model_path, model_channels, model_height, model_width,
964
- model_clip, max_iter, x_location, y_location, patch_height, patch_width, targeted,
965
  det_threshold,dataset_type, image],
966
- outputs=[original_gallery, adversarial_gallery])
967
 
968
  with gr.Row():
969
  clear_btn = gr.ClearButton([image, targeted, original_gallery,
@@ -994,20 +1260,95 @@ with gr.Blocks(css=css, theme='xiaobaiyuan/theme_brief') as demo:
994
  with gr.Tab("AutoAttack"):
995
  gr.Markdown("Coming soon.")
996
 
997
- if __name__ == "__main__":
998
 
999
- import os, sys, subprocess
 
 
 
 
 
 
 
1000
 
 
 
 
1001
  # Huggingface does not support LFS via external https, disable smudge
1002
  os.putenv('GIT_LFS_SKIP_SMUDGE', '1')
1003
 
1004
- HEART_USER=os.environ['HEART_USER']
1005
- HEART_TOKEN=os.environ['HEART_TOKEN']
1006
-
1007
- HEART_INSTALL=f"git+https://{HEART_USER}:{HEART_TOKEN}@gitlab.jatic.net/jatic/ibm/hardened-extension-adversarial-robustness-toolbox.git"
1008
 
1009
- subprocess.run([sys.executable, '-m', 'pip', 'install', HEART_INSTALL])
 
 
 
 
 
 
 
 
 
1010
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1011
  # during development, set debug=True
1012
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1013
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  padding-left: 50px !important;
25
  padding-right: 50px !important;
26
  }
27
+
28
+ .output-image, img {
29
+ border-radius: 0px !important;
30
+ margin: auto !important;
31
+ }
32
  """
33
+ def update_patch_sliders(*args):
34
+ from maite.protocols import HasDataImage, is_typed_dict
35
+
36
+ x_location, y_location, patch_dim, dataset_type, dataset_path, dataset_split, image = args
37
+
38
+ if dataset_type == "Example XView":
39
+ from maite import load_dataset
40
+ import torchvision
41
+ jatic_dataset = load_dataset(
42
+ provider="huggingface",
43
+ dataset_name="CDAO/xview-subset-classification",
44
+ task="image-classification",
45
+ split="test",
46
+ )
47
+ IMAGE_H, IMAGE_W = 224, 224
48
+ transform = torchvision.transforms.Compose(
49
+ [
50
+ torchvision.transforms.Resize((IMAGE_H, IMAGE_W)),
51
+ torchvision.transforms.ToTensor(),
52
+ ]
53
+ )
54
+ jatic_dataset.set_transform(lambda x: {"image": transform(x["image"]), "label": x["label"]})
55
+ image = {'image': [i['image'].numpy() for i in jatic_dataset],
56
+ 'label': [i['label'] for i in jatic_dataset]}
57
+ image = (image['image'][0].transpose(1,2,0)*255).astype(np.uint8)
58
+ elif dataset_type=="huggingface":
59
+ from maite import load_dataset
60
+ jatic_dataset = load_dataset(
61
+ provider=dataset_type,
62
+ dataset_name=dataset_path,
63
+ task="image-classification",
64
+ split=dataset_split,
65
+ drop_labels=False
66
+ )
67
+
68
+ image = {'image': [i['image'] for i in jatic_dataset],
69
+ 'label': [i['label'] for i in jatic_dataset]}
70
+ elif dataset_type=="torchvision":
71
+ from maite import load_dataset
72
+ jatic_dataset = load_dataset(
73
+ provider=dataset_type,
74
+ dataset_name=dataset_path,
75
+ task="image-classification",
76
+ split=dataset_split,
77
+ root='./data/',
78
+ download=True
79
+ )
80
+ image = {'image': [i['image'] for i in jatic_dataset],
81
+ 'label': [i['label'] for i in jatic_dataset]}
82
+ elif dataset_type=="Example CIFAR10":
83
+ from maite import load_dataset
84
+ jatic_dataset = load_dataset(
85
+ provider="torchvision",
86
+ dataset_name="CIFAR10",
87
+ task="image-classification",
88
+ split=dataset_split,
89
+ root='./data/',
90
+ download=True
91
+ )
92
+ image = np.array(jatic_dataset[0]['image'])
93
+ elif dataset_type=="COCO":
94
+ from torchvision.transforms import transforms
95
+ import requests
96
+ from PIL import Image
97
+ NUMBER_CHANNELS = 3
98
+ INPUT_SHAPE = (NUMBER_CHANNELS, 640, 640)
99
+
100
+ transform = transforms.Compose([
101
+ transforms.Resize(INPUT_SHAPE[1], interpolation=transforms.InterpolationMode.BICUBIC),
102
+ transforms.CenterCrop(INPUT_SHAPE[1]),
103
+ transforms.ToTensor()
104
+ ])
105
+
106
+ urls = ['http://images.cocodataset.org/val2017/000000039769.jpg']
107
+
108
+ coco_images = []
109
+ for url in urls:
110
+ im = Image.open(requests.get(url, stream=True).raw)
111
+ im = transform(im).numpy()
112
+ coco_images.append(im)
113
+ image = np.array(coco_images)*255
114
+ image = image[0].transpose(1,2,0).astype(np.uint8)
115
+
116
+ if is_typed_dict(image, HasDataImage):
117
+ image = image['image']
118
+
119
+ if isinstance(image, list):
120
+ image = image[0]
121
+
122
+ height = image.shape[0]
123
+ width = image.shape[1]
124
+
125
+ max_patch = min(height, width)
126
+ if patch_dim > max_patch:
127
+ patch_dim = max_patch
128
+
129
+ max_x = width - (patch_dim)
130
+ max_y = height - (patch_dim)
131
+
132
+ max_x = max_x if max_x >= 0 else 0
133
+ max_y = max_y if max_y >= 0 else 0
134
+
135
+ if x_location > max_x:
136
+ x_location = max_x
137
+ if y_location > max_y:
138
+ y_location = max_y
139
+
140
+ return [gr.Slider(maximum=max_patch, step=1), gr.Slider(maximum=max_x, value=x_location, step=1), gr.Slider(maximum=max_y, value=y_location, step=1)]
141
+
142
+ def preview_patch_location(*args):
143
+ '''
144
+ Create a gallery of images with a sample patch applied
145
+ '''
146
+ import cv2
147
+ from maite.protocols import HasDataImage, is_typed_dict
148
+
149
+ x_location, y_location, patch_dim = int(args[0]), int(args[1]), int(args[2])
150
+
151
+ dataset_type = args[-4]
152
+ dataset_path = args[-3]
153
+ dataset_split = args[-2]
154
+ image = args[-1]
155
+
156
+ if dataset_type == "Example XView":
157
+ from maite import load_dataset
158
+ import torchvision
159
+ jatic_dataset = load_dataset(
160
+ provider="huggingface",
161
+ dataset_name="CDAO/xview-subset-classification",
162
+ task="image-classification",
163
+ split="test",
164
+ )
165
+ IMAGE_H, IMAGE_W = 224, 224
166
+ transform = torchvision.transforms.Compose(
167
+ [
168
+ torchvision.transforms.Resize((IMAGE_H, IMAGE_W)),
169
+ torchvision.transforms.ToTensor(),
170
+ ]
171
+ )
172
+ jatic_dataset.set_transform(lambda x: {"image": transform(x["image"]), "label": x["label"]})
173
+ image = {'image': [i['image'].numpy() for i in jatic_dataset],
174
+ 'label': [i['label'] for i in jatic_dataset]}
175
+ image = (image['image'][0].transpose(1,2,0)*255).astype(np.uint8)
176
+ elif dataset_type=="huggingface":
177
+ from maite import load_dataset
178
+ jatic_dataset = load_dataset(
179
+ provider=dataset_type,
180
+ dataset_name=dataset_path,
181
+ task="image-classification",
182
+ split=dataset_split,
183
+ drop_labels=False
184
+ )
185
+
186
+ image = {'image': [i['image'] for i in jatic_dataset],
187
+ 'label': [i['label'] for i in jatic_dataset]}
188
+ elif dataset_type=="torchvision":
189
+ from maite import load_dataset
190
+ jatic_dataset = load_dataset(
191
+ provider=dataset_type,
192
+ dataset_name=dataset_path,
193
+ task="image-classification",
194
+ split=dataset_split,
195
+ root='./data/',
196
+ download=True
197
+ )
198
+ image = {'image': [i['image'] for i in jatic_dataset],
199
+ 'label': [i['label'] for i in jatic_dataset]}
200
+ elif dataset_type=="Example CIFAR10":
201
+ from maite import load_dataset
202
+ jatic_dataset = load_dataset(
203
+ provider="torchvision",
204
+ dataset_name="CIFAR10",
205
+ task="image-classification",
206
+ split=dataset_split,
207
+ root='./data/',
208
+ download=True
209
+ )
210
+ image = np.array(jatic_dataset[0]['image'])
211
+ elif dataset_type=="COCO":
212
+ from torchvision.transforms import transforms
213
+ import requests
214
+ from PIL import Image
215
+ NUMBER_CHANNELS = 3
216
+ INPUT_SHAPE = (NUMBER_CHANNELS, 640, 640)
217
+
218
+ transform = transforms.Compose([
219
+ transforms.Resize(INPUT_SHAPE[1], interpolation=transforms.InterpolationMode.BICUBIC),
220
+ transforms.CenterCrop(INPUT_SHAPE[1]),
221
+ transforms.ToTensor()
222
+ ])
223
+
224
+ urls = ['http://images.cocodataset.org/val2017/000000039769.jpg']
225
+
226
+ coco_images = []
227
+ for url in urls:
228
+ im = Image.open(requests.get(url, stream=True).raw)
229
+ im = transform(im).numpy()
230
+ coco_images.append(im)
231
+ image = np.array(coco_images)*255
232
+ image = image[0].transpose(1,2,0).astype(np.uint8)
233
+
234
+ if is_typed_dict(image, HasDataImage):
235
+ image = image['image']
236
+
237
+ if isinstance(image, list):
238
+ image = image[0]
239
+
240
+ p0 = x_location, y_location
241
+ p1 = x_location + (patch_dim-1), y_location + (patch_dim-1)
242
+ image = cv2.rectangle(cv2.UMat(image), p0, p1, (255,165,0), cv2.FILLED).get()
243
+
244
+ return image
245
 
246
  def extract_predictions(predictions_, conf_thresh):
247
  coco_labels = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
 
326
  '''
327
  Load an example CIFAR10 model
328
  '''
329
+ from heart_library.estimators.classification.pytorch import JaticPyTorchClassifier
330
 
331
  labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
332
  path = './'
 
447
  image = np.array(coco_images)*255
448
 
449
  if model_type == "YOLOv5":
450
+ from heart_library.estimators.object_detection.pytorch_yolo import JaticPyTorchYolo
451
  coco_labels = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
452
  'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
453
  'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
 
468
  if attack=="PGD":
469
 
470
  from art.attacks.evasion import ProjectedGradientDescent
471
+ from heart_library.attacks.attack import JaticAttack
472
+ from heart_library.metrics import AccuracyPerturbationMetric
473
  from torch.nn.functional import softmax
474
  from maite.protocols import HasDataImage, is_typed_dict
475
 
 
525
 
526
  elif attack=="Adversarial Patch":
527
  from art.attacks.evasion.adversarial_patch.adversarial_patch_pytorch import AdversarialPatchPyTorch
528
+ from heart_library.attacks.attack import JaticAttack
 
 
 
 
529
 
530
  batch_size = 16
531
  scale_min = 0.3
 
534
  learning_rate = 5000.
535
 
536
  patch_attack = AdversarialPatchPyTorch(estimator=detector, rotation_max=rotation_max, patch_location=(args[8], args[9]),
537
+ scale_min=scale_min, scale_max=scale_max, patch_type='square',
538
  learning_rate=learning_rate, max_iter=args[7], batch_size=batch_size,
539
+ patch_shape=(3, args[10], args[10]), verbose=False, targeted=args[-4]=="Yes")
540
 
541
  attack = JaticAttack(patch_attack)
542
 
 
581
  adv_imgs = []
582
  for i, img in enumerate(out_imgs):
583
  adv_imgs.append(img.astype(np.uint8))
584
+
585
+ patch, patch_mask = output.adversarial_patch
586
+ patch_image = ((patch) * patch_mask).transpose(1,2,0).astype(np.uint8)
587
+ return [image, adv_imgs, patch_image]
588
 
589
  def clf_evasion_evaluate(*args):
590
  '''
 
664
  jptc = basic_cifar10_model()
665
  elif model_type == "Example XView":
666
  import torchvision
667
+ from heart_library.estimators.classification.pytorch import JaticPyTorchClassifier
668
  classes = {
669
  0:'Building',
670
  1:'Construction Site',
 
684
  )
685
  elif model_type == "torchvision":
686
  from maite.interop.torchvision import TorchVisionClassifier
687
+ from heart_library.estimators.classification.pytorch import JaticPyTorchClassifier
688
 
689
  clf = TorchVisionClassifier.from_pretrained(model_path)
690
  loss_fn = torch.nn.CrossEntropyLoss(reduction="sum")
 
694
  )
695
  elif model_type == "huggingface":
696
  from maite.interop.huggingface import HuggingFaceImageClassifier
697
+ from heart_library.estimators.classification.pytorch import JaticPyTorchClassifier
698
 
699
  clf = HuggingFaceImageClassifier.from_pretrained(model_path)
700
  loss_fn = torch.nn.CrossEntropyLoss(reduction="sum")
 
705
 
706
  if attack=="PGD":
707
  from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_pytorch import ProjectedGradientDescentPyTorch
708
+ from heart_library.attacks.attack import JaticAttack
709
+ from heart_library.metrics import AccuracyPerturbationMetric
710
  from torch.nn.functional import softmax
711
  from maite.protocols import HasDataImage, is_typed_dict, ArrayLike
712
 
 
766
 
767
  elif attack=="Adversarial Patch":
768
  from art.attacks.evasion.adversarial_patch.adversarial_patch_pytorch import AdversarialPatchPyTorch
769
+ from heart_library.attacks.attack import JaticAttack
770
+ from heart_library.metrics import AccuracyPerturbationMetric
771
  from torch.nn.functional import softmax
772
  from maite.protocols import HasDataImage, is_typed_dict, ArrayLike
773
 
 
783
  patch_attack = AdversarialPatchPyTorch(estimator=jptc, rotation_max=rotation_max, patch_location=(args[8], args[9]),
784
  scale_min=scale_min, scale_max=scale_max, patch_type='square',
785
  learning_rate=learning_rate, max_iter=args[7], batch_size=batch_size,
786
+ patch_shape=(3, args[10], args[10]), verbose=False, targeted=args[11]!="")
787
 
788
  attack = JaticAttack(patch_attack)
789
 
 
793
  for i, label in enumerate(jptc.get_labels()):
794
  labels[label] = preds[0][i]
795
 
796
+ if args[11]!="":
797
  if is_typed_dict(image, HasDataImage):
798
+ data = {'image': image['image'], 'label': [args[11]]*len(image['image'])}
799
  else:
800
+ data = {'image': image, 'label': [args[11]]}
801
  else:
802
  data = image
803
 
 
970
  columns=['label']).rename_axis('target').reset_index(),
971
  visible=False, elem_classes=["small-font", "df-padding"],
972
  type="pandas",interactive=False)
973
+
974
  eval_btn_pgd = gr.Button("Evaluate")
975
  model_clip.change(pgd_update_epsilon, model_clip, eps)
976
  dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
 
1007
  with gr.Row():
1008
 
1009
  with gr.Column(scale=1):
1010
+ with gr.Accordion('Adversarial Patch Parameters', open=False):
1011
+ attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
1012
+ max_iter = gr.Slider(minimum=1, maximum=20, label="Max iterations", value=2, step=1)
1013
+ patch_dim = gr.Slider(minimum=1, maximum=32, label="Patch dimension", value=6, step=1, info="The height and width of the patch")
1014
+ x_location = gr.Slider(minimum=0, maximum=25, label="Location (x)", value=1, step=1, info="Shift patch left and right")
1015
+ y_location = gr.Slider(minimum=0, maximum=25, label="Location (y)", value=1, step=1, info="Shift patch up and down")
1016
+ targeted = gr.Textbox(placeholder="Target label (integer)", label="Target")
1017
+
1018
+ dataset_type.change(update_patch_sliders,
1019
+ [x_location, y_location, patch_dim, dataset_type, dataset_path, dataset_split, image],
1020
+ [patch_dim, x_location, y_location])
1021
+ image.change(update_patch_sliders,
1022
+ [x_location, y_location, patch_dim, dataset_type, dataset_path, dataset_split, image],
1023
+ [patch_dim, x_location, y_location])
1024
+ patch_dim.release(update_patch_sliders,
1025
+ [x_location, y_location, patch_dim, dataset_type, dataset_path, dataset_split, image],
1026
+ [patch_dim, x_location, y_location])
1027
+
1028
+ with gr.Column(scale=1):
1029
+ #adding in preview option for patch location
1030
+ with gr.Accordion('Preview Patch Placement', open=False):
1031
+ gr.Markdown('''<i>Using the location (x and y) and patch size (height and width) controls in the <b>parameters</b>
1032
+ section, you can control how the adversarial patch is positioned.</i>''')
1033
+ with gr.Column():
1034
+ test_patch_gallery = gr.Image(show_label=False, show_download_button=False, elem_classes="output-image")
1035
+
1036
+ preview_patch_loc = gr.Button('Preview Patch Placement')
1037
+ preview_patch_loc.click(preview_patch_location, inputs=[x_location, y_location, patch_dim,
1038
+ dataset_type, dataset_path, dataset_split, image],
1039
+ outputs = [test_patch_gallery])
1040
+ with gr.Column(scale=1):
1041
+ with gr.Accordion('Target Mapping', open=False):
1042
+ gr.Markdown('''<i>If deploying a targeted attack, use the mapping of classes
1043
+ to integer below to populate the <b>target label</b> box in the parameters section.</i>''')
1044
  cifar_labels = gr.Dataframe(pd.DataFrame(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'],
1045
  columns=['label']).rename_axis('target').reset_index(),
1046
  visible=True, elem_classes=["small-font", "df-padding"],
 
1050
  columns=['label']).rename_axis('target').reset_index(),
1051
  visible=False, elem_classes=["small-font", "df-padding"],
1052
  type="pandas",interactive=False)
1053
+ with gr.Row():
1054
+ eval_btn_patch = gr.Button("Evaluate")
1055
+ dataset_type.change(show_target_label_dataframe, dataset_type, [cifar_labels, xview_labels])
1056
+ with gr.Row():
1057
  # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
1058
+
1059
  with gr.Column(scale=2):
1060
+ original_gallery = gr.Gallery(label="Original", preview=True, height=600)
1061
+ benign_output = gr.Label(num_top_classes=3, visible=False)
1062
+ clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
1063
+
1064
+ with gr.Column(scale=2):
1065
+ adversarial_gallery = gr.Gallery(label="Adversarial", preview=True, height=600)
1066
+ adversarial_output = gr.Label(num_top_classes=3, visible=False)
1067
+ robust_accuracy = gr.Number(label="Robust Accuracy", precision=2)
1068
+ with gr.Column(scale=1):
1069
+ patch_image = gr.Image(label="Adversarial Patch")
1070
+
1071
+ dataset_type.change(patch_show_label_output, dataset_type, [benign_output, adversarial_output,
1072
+ clean_accuracy, robust_accuracy, patch_image])
1073
+ eval_btn_patch.click(clf_evasion_evaluate, inputs=[attack, model_type, model_path, model_channels, model_height, model_width,
1074
+ model_clip, max_iter, x_location, y_location, patch_dim, targeted,
1075
+ dataset_type, dataset_path, dataset_split, image],
1076
+ outputs=[original_gallery, benign_output, adversarial_gallery, adversarial_output, clean_accuracy,
1077
+ robust_accuracy, patch_image])
 
1078
 
1079
  with gr.Row():
1080
  clear_btn = gr.ClearButton([image, targeted, original_gallery, benign_output, clean_accuracy,
 
1172
  with gr.Row():
1173
  clear_btn = gr.ClearButton([image, original_gallery,
1174
  adversarial_gallery])
 
 
 
1175
  with gr.Tab("Adversarial Patch"):
1176
  gr.Markdown("This attack crafts an adversarial patch that facilitates evasion.")
1177
 
1178
  with gr.Row():
1179
+ with gr.Column(scale=1):
1180
+ with gr.Accordion("Adversarial Patch Parameters", open=False):
1181
+ attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
1182
+ max_iter = gr.Slider(minimum=1, maximum=100, label="Max iterations", value=1, step=1)
1183
+ patch_dim = gr.Slider(minimum=1, maximum=640, label="Patch dimension", value=100, step=1, info="The height and width of the patch")
1184
+ x_location = gr.Slider(minimum=0, maximum=640, label="Location (x)", value=100, step=1, info="Shift patch left and right")
1185
+ y_location = gr.Slider(minimum=0, maximum=480, label="Location (y)", value=100, step=1, info="Shift patch up and down")
1186
+ targeted = gr.Radio(choices=['Yes', 'No'], value='No', label="Targeted")
1187
+ det_threshold = gr.Slider(minimum=0.0, maximum=100, label="Detection threshold", value=0.2)
1188
+
1189
+ dataset_type.change(update_patch_sliders,
1190
+ [x_location, y_location, patch_dim, dataset_type, dataset_path, dataset_split, image],
1191
+ [patch_dim, x_location, y_location])
1192
+ image.change(update_patch_sliders,
1193
+ [x_location, y_location, patch_dim, dataset_type, dataset_path, dataset_split, image],
1194
+ [patch_dim, x_location, y_location])
1195
+ patch_dim.release(update_patch_sliders,
1196
+ [x_location, y_location, patch_dim, dataset_type, dataset_path, dataset_split, image],
1197
+ [patch_dim, x_location, y_location])
1198
 
1199
  with gr.Column(scale=1):
1200
+ #adding in preview option for patch location
1201
+ with gr.Accordion('Preview Patch Placement', open=False):
1202
+ gr.Markdown('''<i>Using the location (x and y) and patch size (height and width) controls in the <b>parameters</b>
1203
+ section, you can control how the adversarial patch is positioned.</i>''')
1204
+ with gr.Column():
1205
+ test_patch_gallery = gr.Image(show_label=False, show_download_button=False, width=300, height=300, elem_classes=["output-image"])
 
 
 
 
1206
 
1207
+ preview_patch_loc = gr.Button('Preview Patch Placement')
1208
+ preview_patch_loc.click(preview_patch_location, inputs=[x_location, y_location, patch_dim,
1209
+ dataset_type, dataset_path, dataset_split, image],
1210
+ outputs = [test_patch_gallery])
1211
+
1212
+ with gr.Row():
1213
+ eval_btn_patch = gr.Button("Evaluate")
1214
+
1215
+ with gr.Row():
1216
  # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
1217
  with gr.Column(scale=3):
1218
  with gr.Row():
1219
+ with gr.Column(scale=2):
1220
  original_gallery = gr.Gallery(label="Original", preview=True, show_download_button=True, height=600)
1221
 
1222
+ with gr.Column(scale=2):
1223
  adversarial_gallery = gr.Gallery(label="Adversarial", preview=True, show_download_button=True, height=600)
1224
+
1225
+ with gr.Column(scale=1):
1226
+ patch_image = gr.Image(label="Adversarial Patch")
1227
 
1228
  dataset_type.change(patch_show_label_output, dataset_type, [adversarial_output, ])
1229
  eval_btn_patch.click(det_evasion_evaluate, inputs=[attack, model_type, model_path, model_channels, model_height, model_width,
1230
+ model_clip, max_iter, x_location, y_location, patch_dim, targeted,
1231
  det_threshold,dataset_type, image],
1232
+ outputs=[original_gallery, adversarial_gallery, patch_image])
1233
 
1234
  with gr.Row():
1235
  clear_btn = gr.ClearButton([image, targeted, original_gallery,
 
1260
  with gr.Tab("AutoAttack"):
1261
  gr.Markdown("Coming soon.")
1262
 
 
1263
 
1264
+ def launch_demo_via_huggingface():
1265
+ """
1266
+ Hardened Extension of Adversarial Robustness Toolbox (HEART) has not yet been opensourced to Pypi.
1267
+ Until this is completed, the HEART library must be installed via a private repository.
1268
+ This launch method gets private secretes from Huggingface and executes HEART install via pip.
1269
+
1270
+ TODO [HEART Issue#13]: Tear down this Huggingface demo launch switch once HEART has been fully opensourced.
1271
+ """
1272
 
1273
+ import os, re
1274
+ from pip._internal.cli.main import main as pipmain
1275
+
1276
  # Huggingface does not support LFS via external https, disable smudge
1277
  os.putenv('GIT_LFS_SKIP_SMUDGE', '1')
1278
 
1279
+ # Get protected private repository installation command from Huggingface secrets
1280
+ HEART_INSTALL=os.environ['HEART_INSTALL']
1281
+ HEART_REGEX=r"git\+https\:\/\/[a-zA-Z]{9}\:[a-zA-Z0-9\-\_]{26}\@gitlab\.jatic\.net\/jatic\/ibm\/hardened-extension-adversarial-robustness-toolbox\.git"
 
1282
 
1283
+ # Execute pip install
1284
+ if re.match(HEART_REGEX, HEART_INSTALL):
1285
+ pipmain(['install', HEART_INSTALL])
1286
+ else:
1287
+ print(
1288
+ f"""
1289
+ The HEART library was not installed. Credentials supplied were most likely incorrect.
1290
+ Install string supplied did not match filter: {HEART_REGEX}
1291
+ """
1292
+ )
1293
 
1294
+ demo.launch()
1295
+
1296
+
1297
+ def launch_demo_via_local():
1298
+ """
1299
+ Default functionality of launching the Gradio app from any local development environment.
1300
+ This launch method assumes that the local environment can launch a web browser from within the
1301
+ same local environment and navigate to the local host shown in demo.launch() output.
1302
+
1303
+ * Important Notes:
1304
+ - This launch mechanism will not function via Huggingface.
1305
+ - When launching via Raven, share must be set to True (Raven has no local web browser).
1306
+ """
1307
+
1308
  # during development, set debug=True
1309
+ demo.launch(show_api=False, debug=True, share=False,
1310
+ server_name="0.0.0.0",
1311
+ server_port=7777,
1312
+ ssl_verify=False,
1313
+ max_threads=20)
1314
+
1315
+
1316
+ if __name__ == "__main__":
1317
+
1318
+ import socket
1319
+
1320
+ # Huggingface Hostname Patterns
1321
+ HF_SPACES=[
1322
+ "alpha-heart-gradio",
1323
+ "cdao-heart-gradio",
1324
+ ]
1325
 
1326
+ # Try to describe hostname using socket. If this doesn't work, fail open as local.
1327
+ hostname = ""
1328
+
1329
+ try:
1330
+ print(f"Attempting to resolve hostname via socket.gethostname()...")
1331
+ hostname = socket.gethostname()
1332
+ print(f"Hostname resolved successfully as: {hostname}")
1333
+ except:
1334
+ print(f"Unable to resolve hostname via socket.gethostname()...")
1335
+ hostname = "local"
1336
+ print(f"Defaulting to hostname set as: local")
1337
+
1338
+ if any(space in hostname for space in HF_SPACES):
1339
+ print(
1340
+ f"""
1341
+ [{hostname}] is most likely within a Huggingface Space.
1342
+ Current understood list of HF_SPACES: {HF_SPACES}
1343
+ Executing demo.launch() using <launch_demo_via_huggingface()>
1344
+ """
1345
+ )
1346
+ launch_demo_via_huggingface()
1347
+ else:
1348
+ print(
1349
+ f"""
1350
+ {hostname} is either local or uncaptured for demo.launch() switching.
1351
+ Executing demo.launch() using <launch_demo_via_local()>
1352
+ """
1353
+ )
1354
+ launch_demo_via_local()
carbon_colors.py β†’ gradio/carbon_colors.py RENAMED
File without changes
carbon_theme.py β†’ gradio/carbon_theme.py RENAMED
File without changes
requirements.txt CHANGED
@@ -1,55 +1,5 @@
1
- numpy>=1.18.5,<1.25
2
- scipy==1.10.1
3
- matplotlib==3.7.1
4
- scikit-learn>=0.22.2,<1.2.0
5
- six==1.16.0
6
- Pillow>=10.1.0
7
- tqdm==4.65.0
8
- statsmodels==0.13.5
9
- pydub==0.25.1
10
- resampy==0.4.2
11
- ffmpeg-python==0.2.0
12
- cma==3.3.0
13
- pandas==2.0.1
14
- librosa==0.10.0.post2
15
- numba~=0.56.4
16
- opencv-python
17
- sortedcontainers==2.4.0
18
- h5py==3.8.0
19
-
20
- jupyter>=1.0.0
21
- pytest~=7.3.1
22
- pytest-flake8~=1.1.1
23
- flake8~=4.0.1
24
- pytest-mock~=3.10.0
25
- pytest-cov~=4.0.0
26
- requests~=2.31.0
27
-
28
- --find-links https://download.pytorch.org/whl/cu116/torch_stable.html
29
- torch==1.13.1
30
- torchaudio==0.13.1
31
- torchvision==0.14.1
32
-
33
- mxnet-native==1.8.0.post0; sys_platform != "darwin"
34
-
35
- tensorflow==2.10.1; sys_platform != "darwin"
36
- keras==2.10.0; sys_platform != "darwin"
37
- tensorflow-addons>=0.13.0; sys_platform != "darwin"
38
-
39
- catboost==1.1.1
40
- xgboost==1.7.5
41
- yolov5==7.0.13
42
-
43
  multiprocess
44
- gradio>=4.13.0
45
-
46
- kornia~=0.6.12
47
- tensorboardX==2.6
48
- lief==0.12.3
49
-
50
- pylint==2.12.2
51
- mypy==1.4.1
52
- pycodestyle==2.8.0
53
- black==22.3.0
54
- isort==5.12.0
55
-
 
1
+ torch
2
+ torchvision
3
+ yolov5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  multiprocess
5
+ datasets
 
 
 
 
 
 
 
 
 
 
 
setup.py DELETED
@@ -1,42 +0,0 @@
1
- import codecs
2
- import os
3
-
4
- from setuptools import find_packages, setup
5
-
6
- install_requires = [
7
- "maite==0.3.4",
8
- "adversarial-robustness-toolbox==1.16.0",
9
- "scikit-learn>=0.22.2,<1.2.0",
10
- "six",
11
- "setuptools",
12
- "tqdm",
13
- ]
14
-
15
-
16
- def read(rel_path):
17
- here = os.path.abspath(os.path.dirname(__file__))
18
- with codecs.open(os.path.join(here, rel_path), "r", encoding="utf-8") as fp:
19
- return fp.read()
20
-
21
-
22
- def get_version(rel_path):
23
- for line in read(rel_path).splitlines():
24
- if line.startswith("__version__"):
25
- delim = '"' if '"' in line else "'"
26
- return line.split(delim)[1]
27
- raise RuntimeError("Unable to find version string.")
28
-
29
-
30
- setup(
31
- name="hardened-extension-adversarial-robustness-toolbox",
32
- version=get_version("src/heart/__init__.py"),
33
- description="Extension for ART compatible with MAITE.",
34
- author="IBM",
35
- author_email="<email>",
36
- maintainer="IBM",
37
- maintainer_email="<email>",
38
- license="MIT",
39
- install_requires=install_requires,
40
- include_package_data=True,
41
- python_requires=">=3.9,<3.11",
42
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils/data/coco_elephant.jpg DELETED
Binary file (374 kB)