anonymous8 commited on
Commit
944cd2a
1 Parent(s): 6c28a7e
Files changed (1) hide show
  1. app.py +42 -18
app.py CHANGED
@@ -21,8 +21,6 @@ from textattack.attack_recipes import (
21
  from textattack.attack_results import SuccessfulAttackResult
22
  from utils import SentAttacker, get_agnews_example, get_sst2_example, get_amazon_example, get_imdb_example, diff_texts
23
 
24
- nltk.download("omw-1.4")
25
-
26
  sent_attackers = {}
27
  tad_classifiers = {}
28
 
@@ -41,6 +39,8 @@ app = Flask(__name__)
41
 
42
 
43
  def init():
 
 
44
  if not os.path.exists("TAD-SST2"):
45
  z = zipfile.ZipFile("checkpoints.zip", "r")
46
  z.extractall(os.getcwd())
@@ -146,7 +146,6 @@ def generate_adversarial_example(dataset, attacker, text=None, label=None):
146
 
147
 
148
  def run_demo(dataset, attacker, text=None, label=None):
149
-
150
  try:
151
  data = {
152
  "dataset": dataset,
@@ -174,21 +173,31 @@ def run_demo(dataset, attacker, text=None, label=None):
174
  print(e)
175
  return generate_adversarial_example(dataset, attacker, text, label)
176
 
177
- if __name__ == "__main__":
178
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  init()
180
 
181
  demo = gr.Blocks()
182
 
183
  with demo:
184
  gr.Markdown("<h1 align='center'>Reactive Perturbation Defocusing for Textual Adversarial Defense</h1>")
185
- gr.Markdown("<h3 align='center'>Clarifications</h2>")
186
  gr.Markdown("""
187
- - This demo has no mechanism to ensure the adversarial example will be correctly repaired by RPD. The repair success rate is actually the performance reported in the paper (approximately up to 97%).
188
- - The adversarial example and repaired adversarial example may be unnatural to read, while it is because the attackers usually generate unnatural perturbations. RPD does not introduce additional unnatural perturbations.
189
- - To our best knowledge, Reactive Perturbation Defocusing is a novel approach in adversarial defense. RPD significantly (>10% defense accuracy improvement) outperforms the state-of-the-art methods.
190
- - The DeepWordBug is an unknown attacker to the adversarial detector and reactive defense module. DeepWordBug has different attacking patterns from other attackers and shows the generalizability and robustness of RPD.
191
- - To help the review & evaluation of ACL2023, we will host this demo on a GPU device to speed up the inference process in the next month. Then it will be deployed on a CPU device in the future.
192
  """)
193
  gr.Markdown("<h2 align='center'>Natural Example Input</h2>")
194
  with gr.Group():
@@ -207,16 +216,32 @@ if __name__ == "__main__":
207
  with gr.Row():
208
  input_sentence = gr.Textbox(
209
  placeholder="Input a natural example...",
210
- label="Alternatively, input a natural example and its original label to generate an adversarial example.",
211
  )
212
  input_label = gr.Textbox(
213
- placeholder="Original label...", label="Original Label"
214
  )
215
 
216
  button_gen = gr.Button(
217
- "Generate an adversarial example to repair using RPD (GPU: < 1 minute, CPU: 1-10 minutes)",
218
  variant="primary",
219
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
  gr.Markdown("<h2 align='center'>Generated Adversarial Example and Repaired Adversarial Example</h2>")
222
 
@@ -230,14 +255,14 @@ if __name__ == "__main__":
230
  output_adv_label = gr.Textbox(label="Predicted Label of the Adversarial Example")
231
  with gr.Row():
232
  output_repaired_example = gr.Textbox(
233
- label="Repaired Adversarial Example by RPD"
234
  )
235
  output_repaired_label = gr.Textbox(label="Predicted Label of the Repaired Adversarial Example")
236
 
237
  gr.Markdown("<h2 align='center'>Example Difference (Comparisons)</p>")
238
  gr.Markdown("""
239
- <p align='center'>The (+) and (-) in the boxes indicate the added and deleted characters in the adversarial example compared to the original input natural example.</p>
240
- """)
241
  ori_text_diff = gr.HighlightedText(
242
  label="The Original Natural Example",
243
  combine_adjacent=True,
@@ -271,7 +296,7 @@ if __name__ == "__main__":
271
  label="Repaired Standard Classification Result"
272
  )
273
  gr.Markdown(
274
- "If is_repaired=true, it has been repaired by RPD. "
275
  "The pred_label field indicates the standard classification result. "
276
  "The confidence field represents the confidence of the predicted label. "
277
  "The is_correct field indicates whether the predicted label is correct."
@@ -297,4 +322,3 @@ if __name__ == "__main__":
297
  )
298
 
299
  demo.queue(2).launch()
300
-
 
21
  from textattack.attack_results import SuccessfulAttackResult
22
  from utils import SentAttacker, get_agnews_example, get_sst2_example, get_amazon_example, get_imdb_example, diff_texts
23
 
 
 
24
  sent_attackers = {}
25
  tad_classifiers = {}
26
 
 
39
 
40
 
41
  def init():
42
+ nltk.download("omw-1.4")
43
+
44
  if not os.path.exists("TAD-SST2"):
45
  z = zipfile.ZipFile("checkpoints.zip", "r")
46
  z.extractall(os.getcwd())
 
146
 
147
 
148
  def run_demo(dataset, attacker, text=None, label=None):
 
149
  try:
150
  data = {
151
  "dataset": dataset,
 
173
  print(e)
174
  return generate_adversarial_example(dataset, attacker, text, label)
175
 
 
176
 
177
+ def check_gpu():
178
+ try:
179
+ response = requests.post('https://rpddemo.pagekite.me/api/generate_adversarial_example', timeout=3)
180
+ if response.status_code < 500:
181
+ return 'GPU available'
182
+ else:
183
+ return 'GPU not available'
184
+ except Exception as e:
185
+ return 'GPU not available'
186
+
187
+
188
+ if __name__ == "__main__":
189
  init()
190
 
191
  demo = gr.Blocks()
192
 
193
  with demo:
194
  gr.Markdown("<h1 align='center'>Reactive Perturbation Defocusing for Textual Adversarial Defense</h1>")
 
195
  gr.Markdown("""
196
+ - This demo has no mechanism to ensure the adversarial example will be correctly repaired by Rapid. The repair success rate is actually the performance reported in the paper (approximately up to 97%).
197
+ - The adversarial example and repaired adversarial example may be unnatural to read, while it is because the attackers usually generate unnatural perturbations. Rapid does not introduce additional unnatural perturbations.
198
+ - To our best knowledge, Reactive Perturbation Defocusing is a novel approach in adversarial defense. Rapid significantly (>10% defense accuracy improvement) outperforms the state-of-the-art methods.
199
+ - The DeepWordBug is an unknown attacker to the adversarial detector and reactive defense module. DeepWordBug has different attacking patterns from other attackers and shows the generalizability and robustness of Rapid.
200
+ - To help the review & evaluation of EMNLP-2023, we will host this demo on a GPU device to speed up the inference process d. Then it will be deployed on a CPU device in the future.
201
  """)
202
  gr.Markdown("<h2 align='center'>Natural Example Input</h2>")
203
  with gr.Group():
 
216
  with gr.Row():
217
  input_sentence = gr.Textbox(
218
  placeholder="Input a natural example...",
219
+ label="Alternatively, input a natural example and its original label (from above datasets) to generate an adversarial example.",
220
  )
221
  input_label = gr.Textbox(
222
+ placeholder="Original label, must be an integer...", label="Original Label"
223
  )
224
 
225
  button_gen = gr.Button(
226
+ "Generate an adversarial example to repair using Rapid (GPU: < 1 minute, CPU: 1-10 minutes)",
227
  variant="primary",
228
  )
229
+ gpu_status_text = gr.Textbox(
230
+ label='GPU status',
231
+ placeholder="Please click to check",
232
+ )
233
+ button_check = gr.Button(
234
+ "Check if GPU available",
235
+ variant="primary"
236
+ )
237
+
238
+ button_check.click(
239
+ fn=check_gpu,
240
+ inputs=[],
241
+ outputs=[
242
+ gpu_status_text
243
+ ]
244
+ )
245
 
246
  gr.Markdown("<h2 align='center'>Generated Adversarial Example and Repaired Adversarial Example</h2>")
247
 
 
255
  output_adv_label = gr.Textbox(label="Predicted Label of the Adversarial Example")
256
  with gr.Row():
257
  output_repaired_example = gr.Textbox(
258
+ label="Repaired Adversarial Example by Rapid"
259
  )
260
  output_repaired_label = gr.Textbox(label="Predicted Label of the Repaired Adversarial Example")
261
 
262
  gr.Markdown("<h2 align='center'>Example Difference (Comparisons)</p>")
263
  gr.Markdown("""
264
+ <p align='center'>The (+) and (-) in the boxes indicate the added and deleted characters in the adversarial example compared to the original input natural example.</p>
265
+ """)
266
  ori_text_diff = gr.HighlightedText(
267
  label="The Original Natural Example",
268
  combine_adjacent=True,
 
296
  label="Repaired Standard Classification Result"
297
  )
298
  gr.Markdown(
299
+ "If is_repaired=true, it has been repaired by Rapid. "
300
  "The pred_label field indicates the standard classification result. "
301
  "The confidence field represents the confidence of the predicted label. "
302
  "The is_correct field indicates whether the predicted label is correct."
 
322
  )
323
 
324
  demo.queue(2).launch()