anonymous8 commited on
Commit
34b7dc2
1 Parent(s): 944cd2a
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -124,8 +124,8 @@ def generate_adversarial_example(dataset, attacker, text=None, label=None):
124
  }[result["is_adv_label"]]
125
  advdetection_df["perturbed_label"] = result["perturbed_label"]
126
  advdetection_df["confidence"] = round(result["is_adv_confidence"], 3)
127
- # advdetection_df['ref_is_attack'] = result['ref_is_adv_label']
128
- # advdetection_df['is_correct'] = result['ref_is_adv_check']
129
 
130
  else:
131
  return generate_adversarial_example(dataset, attacker)
@@ -186,24 +186,23 @@ def check_gpu():
186
 
187
 
188
  if __name__ == "__main__":
189
- init()
190
 
191
  demo = gr.Blocks()
192
 
193
  with demo:
194
- gr.Markdown("<h1 align='center'>Reactive Perturbation Defocusing for Textual Adversarial Defense</h1>")
195
  gr.Markdown("""
196
- - This demo has no mechanism to ensure the adversarial example will be correctly repaired by Rapid. The repair success rate is actually the performance reported in the paper (approximately up to 97%).
197
  - The adversarial example and repaired adversarial example may be unnatural to read, while it is because the attackers usually generate unnatural perturbations. Rapid does not introduce additional unnatural perturbations.
198
  - To our best knowledge, Reactive Perturbation Defocusing is a novel approach in adversarial defense. Rapid significantly (>10% defense accuracy improvement) outperforms the state-of-the-art methods.
199
  - The DeepWordBug is an unknown attacker to the adversarial detector and reactive defense module. DeepWordBug has different attacking patterns from other attackers and shows the generalizability and robustness of Rapid.
200
- - To help the review & evaluation of EMNLP-2023, we will host this demo on a GPU device to speed up the inference process d. Then it will be deployed on a CPU device in the future.
201
  """)
202
  gr.Markdown("<h2 align='center'>Natural Example Input</h2>")
203
  with gr.Group():
204
  with gr.Row():
205
  input_dataset = gr.Radio(
206
- choices=["SST2", "AGNews10K", "Amazon"],
207
  value="SST2",
208
  label="Select a testing dataset and an adversarial attacker to generate an adversarial example.",
209
  )
@@ -259,7 +258,7 @@ if __name__ == "__main__":
259
  )
260
  output_repaired_label = gr.Textbox(label="Predicted Label of the Repaired Adversarial Example")
261
 
262
- gr.Markdown("<h2 align='center'>Example Difference (Comparisons)</p>")
263
  gr.Markdown("""
264
  <p align='center'>The (+) and (-) in the boxes indicate the added and deleted characters in the adversarial example compared to the original input natural example.</p>
265
  """)
@@ -321,4 +320,4 @@ if __name__ == "__main__":
321
  ],
322
  )
323
 
324
- demo.queue(2).launch()
 
124
  }[result["is_adv_label"]]
125
  advdetection_df["perturbed_label"] = result["perturbed_label"]
126
  advdetection_df["confidence"] = round(result["is_adv_confidence"], 3)
127
+ advdetection_df['ref_is_attack'] = result['ref_is_adv_label']
128
+ advdetection_df['is_correct'] = result['ref_is_adv_check']
129
 
130
  else:
131
  return generate_adversarial_example(dataset, attacker)
 
186
 
187
 
188
  if __name__ == "__main__":
189
+ # init()
190
 
191
  demo = gr.Blocks()
192
 
193
  with demo:
194
+ gr.Markdown("<h1 align='center'>Reactive Perturbation Defocusing (Rapid) for Textual Adversarial Defense</h1>")
195
  gr.Markdown("""
196
+ - This demo has no mechanism to ensure the adversarial example will be correctly repaired by Rapid. The repair success rate is actually the performance reported in the paper.
197
  - The adversarial example and repaired adversarial example may be unnatural to read, while it is because the attackers usually generate unnatural perturbations. Rapid does not introduce additional unnatural perturbations.
198
  - To our best knowledge, Reactive Perturbation Defocusing is a novel approach in adversarial defense. Rapid significantly (>10% defense accuracy improvement) outperforms the state-of-the-art methods.
199
  - The DeepWordBug is an unknown attacker to the adversarial detector and reactive defense module. DeepWordBug has different attacking patterns from other attackers and shows the generalizability and robustness of Rapid.
 
200
  """)
201
  gr.Markdown("<h2 align='center'>Natural Example Input</h2>")
202
  with gr.Group():
203
  with gr.Row():
204
  input_dataset = gr.Radio(
205
+ choices=["SST2", "AGNews10K", "Yahoo", "Amazon"],
206
  value="SST2",
207
  label="Select a testing dataset and an adversarial attacker to generate an adversarial example.",
208
  )
 
258
  )
259
  output_repaired_label = gr.Textbox(label="Predicted Label of the Repaired Adversarial Example")
260
 
261
+ gr.Markdown("<h2 align='center'>Example Comparisons</p>")
262
  gr.Markdown("""
263
  <p align='center'>The (+) and (-) in the boxes indicate the added and deleted characters in the adversarial example compared to the original input natural example.</p>
264
  """)
 
320
  ],
321
  )
322
 
323
+ demo.queue(concurrency_count=10).launch()