PFEemp2024 commited on
Commit
6e0fc99
1 Parent(s): fa72b49

Upload GreedyMultyGeneration.py

Browse files
Files changed (1) hide show
  1. GreedyMultyGeneration.py +721 -0
GreedyMultyGeneration.py ADDED
@@ -0,0 +1,721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from textattack.search_methods import SearchMethod
3
+ from textattack.goal_function_results import GoalFunctionResultStatus
4
+
5
+ class GreedyMultipleGeneration(SearchMethod):
6
+ def __init__(
7
+ self,
8
+ wir_method="delete",
9
+ k=30,
10
+ embed=None,
11
+ file=None,
12
+ rollback_level=3,
13
+ naive=False,
14
+ clust=None,
15
+ train_file="train_file.csv",
16
+ ):
17
+ self.wir_method = wir_method
18
+ self.k = k # maximum iterations
19
+ self.embed = embed # universal sentence encoder
20
+ self.file = file # similarity file to store the textual similarity
21
+ self.naive = naive
22
+ self.rollback_level = rollback_level
23
+ self.successful_attacks = {}
24
+ self.clust = clust
25
+
26
+ def _get_index_order(self, initial_text, indices_to_order):
27
+ """Returns word indices of ``initial_text`` in descending order of
28
+ importance."""
29
+
30
+ if "unk" in self.wir_method:
31
+ leave_one_texts = [
32
+ initial_text.replace_word_at_index(i, "[UNK]") for i in indices_to_order
33
+ ]
34
+ leave_one_results, search_over = self.get_goal_results(leave_one_texts)
35
+ index_scores = np.array([result.score for result in leave_one_results])
36
+
37
+ elif "delete" in self.wir_method:
38
+ leave_one_texts = [
39
+ initial_text.delete_word_at_index(i) for i in indices_to_order
40
+ ]
41
+ leave_one_results, search_over = self.get_goal_results(leave_one_texts)
42
+ # print(f"leave_one_results : {leave_one_results}")
43
+ # print(f"search_over : {search_over}")
44
+
45
+ index_scores = np.array([result.score for result in leave_one_results])
46
+
47
+ elif "weighted-saliency" in self.wir_method:
48
+ # first, compute word saliency
49
+ leave_one_texts = [
50
+ initial_text.replace_word_at_index(i, "unk") for i in indices_to_order
51
+ ]
52
+ leave_one_results, search_over = self.get_goal_results(leave_one_texts)
53
+ saliency_scores = np.array([result.score for result in leave_one_results])
54
+
55
+ softmax_saliency_scores = softmax(
56
+ torch.Tensor(saliency_scores), dim=0
57
+ ).numpy()
58
+
59
+ # compute the largest change in score we can find by swapping each word
60
+ delta_ps = []
61
+ for idx in indices_to_order:
62
+ # Exit Loop when search_over is True - but we need to make sure delta_ps
63
+ # is the same size as softmax_saliency_scores
64
+ if search_over:
65
+ delta_ps = delta_ps + [0.0] * (
66
+ len(softmax_saliency_scores) - len(delta_ps)
67
+ )
68
+ break
69
+
70
+ transformed_text_candidates = self.get_transformations(
71
+ initial_text,
72
+ original_text=initial_text,
73
+ indices_to_modify=[idx],
74
+ )
75
+ if not transformed_text_candidates:
76
+ # no valid synonym substitutions for this word
77
+ delta_ps.append(0.0)
78
+ continue
79
+ swap_results, search_over = self.get_goal_results(
80
+ transformed_text_candidates
81
+ )
82
+ score_change = [result.score for result in swap_results]
83
+ if not score_change:
84
+ delta_ps.append(0.0)
85
+ continue
86
+ max_score_change = np.max(score_change)
87
+ delta_ps.append(max_score_change)
88
+
89
+ index_scores = softmax_saliency_scores * np.array(delta_ps)
90
+
91
+ elif "gradient" in self.wir_method:
92
+ victim_model = self.get_victim_model()
93
+
94
+ index_scores = np.zeros(len(indices_to_order))
95
+ grad_output = victim_model.get_grad(initial_text.tokenizer_input)
96
+ gradient = grad_output["gradient"]
97
+ word2token_mapping = initial_text.align_with_model_tokens(victim_model)
98
+ for i, index in enumerate(indices_to_order):
99
+ matched_tokens = word2token_mapping[index]
100
+ if not matched_tokens:
101
+ index_scores[i] = 0.0
102
+ else:
103
+ agg_grad = np.mean(gradient[matched_tokens], axis=0)
104
+ index_scores[i] = np.linalg.norm(agg_grad, ord=1)
105
+
106
+ search_over = False
107
+
108
+ index_order = np.array(indices_to_order)[(-index_scores).argsort()]
109
+ index_scores = sorted(index_scores, reverse=True)
110
+ return index_order, search_over, index_scores
111
+
112
+ # This present a rollback for reducing perturbation only
113
+ def swap_to_origin(self, cur_result, initial_result, index):
114
+ """Replace the chosen word with it origin a return a result instance"""
115
+ new_attacked_text = cur_result.attacked_text.replace_word_at_index(
116
+ index, initial_result.attacked_text.words[index]
117
+ )
118
+ result, _ = self.get_goal_results([new_attacked_text])
119
+ return result[0]
120
+
121
+ def check_synonym_validity(
122
+ ind, ind_synonym, Synonym_indices, Current_attacked_Results, j, synonym
123
+ ):
124
+ """Checks if a synonym is valid for a given index in the attacked text.
125
+
126
+ Args:
127
+ ind: The index of the word in the attacked text.
128
+ ind_synonym: The index of the synonym in the list of synonyms.
129
+ Synonym_indices: A dictionary of synonym indices.
130
+ Current_attacked_Results: A list of AttackedResult objects.
131
+ j: The index of the current AttackedResult object in the list.
132
+ synonym: The synonym to check.
133
+
134
+ Returns:
135
+ True if the synonym is valid, False otherwise."""
136
+
137
+ # Check if the synonym has already been chosen.
138
+ if (ind, ind_synonym) in Synonym_indices:
139
+ return False
140
+
141
+ # Get the current attacked text and its words.
142
+ current_attacked_text = Current_attacked_Results[j].attacked_text
143
+ current_attacked_words = current_attacked_text.words
144
+
145
+ # Check if the synonym is already present in the attacked text.
146
+ if synonym in current_attacked_words[ind]:
147
+ return False
148
+
149
+ return True
150
+
151
+ def generate_naive_attack(self, initial_result):
152
+ curent_result = initial_result
153
+ # dict of preturbed indexes with theire scores on on the original text
154
+ perturbed_indexes = {}
155
+ # possible synonyms of each index with theire scores on the original text to reduce avg num queries
156
+ synonyms = {}
157
+ # to track indexes with no transformation so we avoid recalculate them to reduce avg num queries
158
+ non_usefull_indexes = []
159
+ attacked_text = initial_result.attacked_text
160
+ _, indices_to_order = self.get_indices_to_order(attacked_text)
161
+
162
+ # Sort words by order of importance
163
+
164
+ index_order, search_over, _ = self._get_index_order(
165
+ attacked_text, indices_to_order
166
+ )
167
+
168
+ # iterate through words by theire importance
169
+ for index in index_order:
170
+ if search_over:
171
+ break
172
+ transformed_text_candidates = self.get_transformations(
173
+ curent_result.attacked_text,
174
+ original_text=initial_result.attacked_text,
175
+ indices_to_modify=[index],
176
+ )
177
+
178
+ if len(transformed_text_candidates) == 0:
179
+ # track unusefull words to optimize the code .
180
+ non_usefull_indexes.append(index)
181
+ continue
182
+ else:
183
+ results, search_over = self.get_goal_results(
184
+ transformed_text_candidates
185
+ )
186
+
187
+ max_result = max(results, key=lambda x: x.score)
188
+
189
+ if max_result.score > curent_result.score:
190
+ if self.naive == False:
191
+ # store perturbed indexes with theire score
192
+ perturbed_indexes[index] = max_result.score - curent_result.score
193
+ # add all synonyms except the one we ve been using
194
+ synonyms[index] = [
195
+ (results[i].score, trans.words[index])
196
+ for i, trans in enumerate(transformed_text_candidates)
197
+ if trans.words[index] != max_result.attacked_text.words[index]
198
+ ]
199
+
200
+ curent_result = max_result
201
+
202
+ if curent_result.goal_status == GoalFunctionResultStatus.SUCCEEDED:
203
+ return (
204
+ curent_result,
205
+ perturbed_indexes,
206
+ non_usefull_indexes,
207
+ synonyms,
208
+ curent_result.goal_status,
209
+ )
210
+
211
+ return (
212
+ curent_result,
213
+ perturbed_indexes,
214
+ non_usefull_indexes,
215
+ synonyms,
216
+ curent_result.goal_status,
217
+ )
218
+
219
+ # TODO we can add depth to track how many words rolled back for more statistics
220
+
221
+ def perturbed_index_swap(
222
+ self,
223
+ initial_result,
224
+ curent_result,
225
+ non_perturbed_indexes,
226
+ perturbed_indexes,
227
+ synonyms,
228
+ steps,
229
+ ):
230
+ past_curent_result = curent_result
231
+ # the index with minimum perturbation
232
+ rollback_found = False
233
+ steps = min(steps, len(perturbed_indexes) - 1)
234
+ sucsefull_attacks = []
235
+ for _ in range(steps):
236
+ # TODO getting the least important perturbated word in the new attacked sample costs a lot
237
+ rollback_index = min(perturbed_indexes, key=perturbed_indexes.get)
238
+ # TODO remove from perturbed_indexes list and add it to non_perturbed_indexes but with punalitié
239
+ # how punalité should look like ? it could be at the end of the quee with visited flag
240
+ # or we can just eliminate it .
241
+ perturbed_indexes.pop(rollback_index, None)
242
+ for index in non_perturbed_indexes:
243
+ # early returning
244
+ if len(perturbed_indexes) == 1:
245
+ return (
246
+ curent_result,
247
+ non_perturbed_indexes,
248
+ perturbed_indexes,
249
+ synonyms,
250
+ sucsefull_attacks,
251
+ rollback_found,
252
+ )
253
+
254
+ # get candidates for non perturbed word
255
+ transformed_text_candidates = self.get_transformations(
256
+ curent_result.attacked_text,
257
+ original_text=initial_result.attacked_text,
258
+ indices_to_modify=[index],
259
+ )
260
+
261
+ if len(transformed_text_candidates) == 0:
262
+ non_perturbed_indexes.remove(index)
263
+ continue # wa7ed ma chaf wa7ed
264
+
265
+ results, _ = self.get_goal_results(transformed_text_candidates)
266
+
267
+ # we add one perturbed word
268
+ max_result = max(results, key=lambda x: x.score)
269
+ for res in results:
270
+ if res.score > curent_result.score:
271
+ if res.goal_status == GoalFunctionResultStatus.SUCCEEDED:
272
+ synonyms = self.update_synonyms(
273
+ synonyms=synonyms,
274
+ index_to_add=index,
275
+ index_to_remove=None,
276
+ curent_result=res,
277
+ results=results,
278
+ transformed_text_candidates=transformed_text_candidates,
279
+ )
280
+ # stock this sucssefull attack
281
+ sucsefull_attacks.append(res)
282
+ # we get better score
283
+ if max_result.score > curent_result.score:
284
+ # eplore minimum perturbation on the original text
285
+ inferior = min(perturbed_indexes, key=perturbed_indexes.get)
286
+ non_perturbed_indexes.remove(index) # remove perturbed index
287
+
288
+ perturbed_indexes[index] = max_result.score - curent_result.score
289
+ # restore one perturbed
290
+ result_rollback = self.swap_to_origin(
291
+ max_result, initial_result, rollback_index
292
+ )
293
+
294
+ perturbed_indexes.pop(inferior, None)
295
+
296
+ new_attacked_text = (
297
+ result_rollback.attacked_text.replace_word_at_index(
298
+ inferior,
299
+ initial_result.attacked_text.words[inferior],
300
+ )
301
+ )
302
+
303
+ result, _ = self.get_goal_results([new_attacked_text])
304
+
305
+ result_rollback = max(result, key=lambda x: x.score)
306
+ for res in result:
307
+
308
+ if res.goal_status == GoalFunctionResultStatus.SUCCEEDED:
309
+ synonyms = self.update_synonyms(
310
+ synonyms,
311
+ index,
312
+ inferior,
313
+ res,
314
+ results,
315
+ transformed_text_candidates,
316
+ )
317
+ # stock this sucssefull attack
318
+ sucsefull_attacks.append(res)
319
+ if (
320
+ result_rollback.goal_status
321
+ == GoalFunctionResultStatus.SUCCEEDED
322
+ ):
323
+ rollback_found = True
324
+ synonyms = self.update_synonyms(
325
+ synonyms,
326
+ index,
327
+ inferior,
328
+ result_rollback,
329
+ results,
330
+ transformed_text_candidates,
331
+ )
332
+ curent_result = result_rollback
333
+
334
+ if rollback_found:
335
+ return (
336
+ curent_result,
337
+ non_perturbed_indexes,
338
+ perturbed_indexes,
339
+ synonyms,
340
+ sucsefull_attacks,
341
+ rollback_found,
342
+ )
343
+ return (
344
+ past_curent_result,
345
+ non_perturbed_indexes,
346
+ perturbed_indexes,
347
+ synonyms,
348
+ sucsefull_attacks,
349
+ rollback_found,
350
+ )
351
+
352
+ def update_synonyms(
353
+ self,
354
+ synonyms,
355
+ index_to_add=None,
356
+ index_to_remove=None,
357
+ curent_result=None,
358
+ results=None,
359
+ transformed_text_candidates=None,
360
+ ):
361
+ """Return an updated list of synonyms"""
362
+ if index_to_remove in synonyms and len(synonyms[index_to_remove]) != 0:
363
+ # remove the used synonym of certain index
364
+ synonyms[index_to_remove] = [
365
+ syn
366
+ for syn in synonyms[index_to_remove]
367
+ if syn[1] != curent_result.attacked_text.words[index_to_remove]
368
+ ]
369
+
370
+ # add synonyms of new perturbated word with their score
371
+ if index_to_add is not None and transformed_text_candidates is not None:
372
+ synonyms[index_to_add] = [
373
+ (results[i].score, trans.words[index_to_add])
374
+ for i, trans in enumerate(transformed_text_candidates)
375
+ if trans.words[index_to_add]
376
+ != curent_result.attacked_text.words[index_to_add]
377
+ ]
378
+
379
+ return synonyms
380
+
381
+ def get_non_perturbed_indexes(
382
+ self, initial_result, perturbed_indexes, non_usefull_indexes
383
+ ):
384
+ """Return a list of non perturbed indexes"""
385
+ all_indexes = set(range(len(initial_result.attacked_text.words)))
386
+ perturbed_indexes_set = set(perturbed_indexes.keys())
387
+ non_usefull_indexes_set = set(non_usefull_indexes)
388
+ non_perturbed_indexes = list(
389
+ all_indexes - perturbed_indexes_set - non_usefull_indexes_set
390
+ )
391
+ return non_perturbed_indexes
392
+
393
+ def perform_search(self, initial_result):
394
+
395
+ (
396
+ curent_result,
397
+ perturbed_indexes,
398
+ non_usefull_indexes,
399
+ synonyms,
400
+ goal_statut,
401
+ ) = self.generate_naive_attack(initial_result)
402
+ sucsefull_attacks = [curent_result]
403
+
404
+ new_curent_sucsefull_attacks = [curent_result]
405
+ if not self.naive:
406
+ # perturbed_index_swap is our 1s priority (in case of attack succeed goal_statut = 0 )
407
+ for i in range(self.k):
408
+ non_perturbed_indexes = self.get_non_perturbed_indexes(
409
+ initial_result, perturbed_indexes, non_usefull_indexes
410
+ )
411
+ if len(new_curent_sucsefull_attacks) != 0:
412
+ # how to decide on the next text to be treated here we work on the the one with max score
413
+ curent_result = max(
414
+ new_curent_sucsefull_attacks, key=lambda x: x.score
415
+ )
416
+ new_curent_sucsefull_attacks.remove(curent_result)
417
+ else:
418
+ curent_result, synonyms, synonym_found = self.swap_to_synonym(
419
+ curent_result, synonyms, perturbed_indexes
420
+ )
421
+ if synonym_found == True:
422
+ sucsefull_attacks.append(curent_result)
423
+ new_curent_sucsefull_attacks.append(curent_result)
424
+ continue
425
+
426
+ else:
427
+ non_perturbed_indexes = self.get_non_perturbed_indexes(
428
+ initial_result, perturbed_indexes, non_usefull_indexes
429
+ )
430
+ (
431
+ non_perturbed_indexes,
432
+ perturbed_indexes,
433
+ synonyms,
434
+ max_result,
435
+ sample_found,
436
+ ) = self.random_selection(
437
+ non_perturbed_indexes,
438
+ perturbed_indexes,
439
+ synonyms,
440
+ curent_result,
441
+ initial_result,
442
+ )
443
+
444
+ if sample_found == True:
445
+ new_curent_sucsefull_attacks.append(max_result)
446
+ sucsefull_attacks.append(curent_result)
447
+
448
+ else:
449
+ break
450
+ if i % 3 == 0:
451
+ non_perturbed_indexes = self.get_non_perturbed_indexes(
452
+ initial_result, perturbed_indexes, non_usefull_indexes
453
+ )
454
+ (
455
+ non_perturbed_indexes,
456
+ perturbed_indexes,
457
+ synonyms,
458
+ max_result,
459
+ sample_found,
460
+ ) = self.random_selection(
461
+ non_perturbed_indexes,
462
+ perturbed_indexes,
463
+ synonyms,
464
+ curent_result,
465
+ initial_result,
466
+ )
467
+ if sample_found == True:
468
+ new_curent_sucsefull_attacks.append(max_result)
469
+ sucsefull_attacks.append(curent_result)
470
+
471
+ if len(perturbed_indexes) > 1 and not goal_statut:
472
+ non_perturbed_indexes = self.get_non_perturbed_indexes(
473
+ initial_result, perturbed_indexes, non_usefull_indexes
474
+ )
475
+ (
476
+ curent_result,
477
+ non_perturbed_indexes,
478
+ perturbed_indexes,
479
+ synonyms,
480
+ sucsefull_attacks_partial,
481
+ rollback_found,
482
+ ) = self.perturbed_index_swap(
483
+ initial_result,
484
+ curent_result,
485
+ non_perturbed_indexes,
486
+ perturbed_indexes,
487
+ synonyms,
488
+ steps=self.rollback_level,
489
+ )
490
+ if len(sucsefull_attacks_partial) != 0:
491
+ sucsefull_attacks.extend(sucsefull_attacks_partial)
492
+ new_curent_sucsefull_attacks.extend(sucsefull_attacks_partial)
493
+ # Action 2: the case where no rollback found we try to swap synonym and we aim to get better result
494
+ if rollback_found == False:
495
+ curent_result, synonyms, synonym_found = self.swap_to_synonym(
496
+ curent_result, synonyms, perturbed_indexes
497
+ )
498
+ if synonym_found == True:
499
+ sucsefull_attacks.append(curent_result)
500
+ new_curent_sucsefull_attacks.append(curent_result)
501
+
502
+ # if it's a failed attack we give chance for an other synonym
503
+ # we will pass it for now because no improvment were found
504
+ """elif goal_statut == 1:
505
+ curent_result, synonyms, goal_statut = self.swap_to_synonym(
506
+ curent_result, synonyms, perturbed_indexes
507
+ )"""
508
+
509
+ if goal_statut == 0:
510
+ sucsefull_attacks_text_scores = []
511
+ sucsefull_attacks_text_scores = [
512
+ (atk.attacked_text, atk.score)
513
+ for atk in sucsefull_attacks
514
+ if atk.score > 0.5
515
+ ]
516
+ sucsefull_attacks_text_scores = list(set(sucsefull_attacks_text_scores))
517
+
518
+ self.successful_attacks[initial_result.attacked_text] = (
519
+ sucsefull_attacks_text_scores
520
+ )
521
+ ground_truth_output = sucsefull_attacks[0].ground_truth_output
522
+
523
+ self.save_to_train(
524
+ self,
525
+ initial_result.attacked_text,
526
+ sucsefull_attacks_text_scores,
527
+ ground_truth_output,
528
+ )
529
+
530
+ try:
531
+ best_result = self.min_perturbation(
532
+ sucsefull_attacks, initial_result.attacked_text
533
+ )
534
+ return best_result
535
+ except:
536
+ return curent_result
537
+
538
+ def save_to_train(
539
+ self,
540
+ original_text,
541
+ sucsefull_attacks_text_scores,
542
+ ground_truth_output,
543
+ train_file,
544
+ ):
545
+ successful_attacks = {
546
+ original_text.attacked_text: sucsefull_attacks_text_scores
547
+ }
548
+ self.save_to_JSON(filename="temp.json", successful_attacks=successful_attacks)
549
+
550
+ self.pipeline(ground_truth_output, train_file)
551
+
552
+ def pipeline(self, ground_truth_output, train_file):
553
+ clust = self.clust
554
+ clust.file_ = "temp.json"
555
+ sentence_embedding_vectors, masks, scores = clust.prepare_sentences()
556
+
557
+ unified_mask = clust.get_global_unified_masks(masks=masks)
558
+
559
+ sentences = clust.apply_mask_on_global_vectors(
560
+ global_sentences=sentence_embedding_vectors, unified_masks=unified_mask
561
+ )
562
+
563
+ sentences = clust.global_matrix_to_global_sentences(
564
+ global_matrix_sentences=sentences
565
+ )
566
+
567
+ global_clustering = clust.find_global_best_clustering(
568
+ sentences, 10, "thumb-rule"
569
+ )
570
+
571
+ selected_samples = clust.global_select_diverce_sample(
572
+ scores, sentences, global_clustering
573
+ )
574
+
575
+ clust.save_csv(selected_samples, ground_truth_output, train_file)
576
+
577
+ def save_to_JSON(self, filename, successful_attacks):
578
+ data_list = []
579
+ input_dict = {}
580
+ for atk in successful_attacks:
581
+ successful_attacks_with_scores = [
582
+ (atk, score) for atk, score in successful_attacks[atk]
583
+ ]
584
+ input_dict[" ".join(atk.words)] = successful_attacks_with_scores
585
+ for original, samples in input_dict.items():
586
+ samples_list = [
587
+ {"attacked_text": " ".join(text.words), "score": score}
588
+ for text, score in samples
589
+ ]
590
+ data_list.append({"original": original, "samples": samples_list})
591
+
592
+ # Save the formatted data to a JSON file
593
+ with open(filename, "w") as json_file:
594
+ json.dump({"data": data_list}, json_file, indent=4)
595
+
596
+ def swap_to_synonym(self, curent_result, synonyms, perturbed_indexes):
597
+ # giving chance to the second synonym of the most perturbated word if exists !
598
+ found = False
599
+ for index in perturbed_indexes:
600
+ if index in synonyms and len(synonyms[index]) != 0:
601
+ # what about other indexes we may give them chance too !
602
+ # response : experiments shows that there is no much improvment taking in consideration the high increase of avg Q-num
603
+ synonym = max(synonyms[index], key=lambda x: x[0])
604
+ if synonym[0] > 0.8:
605
+ new_attacked_text = (
606
+ curent_result.attacked_text.replace_word_at_index(
607
+ index,
608
+ synonym[1],
609
+ )
610
+ )
611
+ curent_result.attacked_text = (
612
+ curent_result.attacked_text.replace_word_at_index(
613
+ index,
614
+ synonym[1],
615
+ )
616
+ )
617
+
618
+ synonyms = self.update_synonyms(
619
+ synonyms=synonyms,
620
+ index_to_remove=index,
621
+ curent_result=curent_result,
622
+ )
623
+ found = True
624
+ return curent_result, synonyms, found
625
+
626
+ # remove index with 0 synonymswithin the list
627
+ synonyms.pop(index, None)
628
+
629
+ return curent_result, synonyms, found
630
+
631
+ def min_perturbation(self, results, original_text):
632
+ # Initialize minimum score and result
633
+ min_score = float("inf")
634
+ min_result = None
635
+ original_text_splited = original_text.words
636
+ for result in results:
637
+ # Calculate perturbation as the number of words changed
638
+ attacked_text = result.attacked_text
639
+ perturbation = sum(
640
+ i != j for i, j in zip(original_text_splited, attacked_text.words)
641
+ )
642
+
643
+ # Update minimum score and result if necessary
644
+ if perturbation < min_score:
645
+ min_score = perturbation
646
+ min_result = result
647
+
648
+ return min_result
649
+
650
+ def check_transformation_compatibility(self, transformation):
651
+ """Since it ranks words by their importance, the algorithm is
652
+ limited to word swap and deletion transformations."""
653
+ return transformation_consists_of_word_swaps_and_deletions(transformation)
654
+
655
+ def random_selection(
656
+ self,
657
+ non_perturbed_indexes,
658
+ perturbed_indexes,
659
+ synonyms,
660
+ curent_result,
661
+ initial_result,
662
+ ):
663
+ max_iterations = len(non_perturbed_indexes)
664
+ sample_found = False
665
+ for _ in range(max_iterations):
666
+ random_index = random.choice(non_perturbed_indexes)
667
+ transformed_text_candidates = self.get_transformations(
668
+ curent_result.attacked_text,
669
+ original_text=initial_result.attacked_text,
670
+ indices_to_modify=[random_index],
671
+ )
672
+ if len(transformed_text_candidates) == 0:
673
+ non_perturbed_indexes.remove(random_index)
674
+ continue
675
+
676
+ results, _ = self.get_goal_results([transformed_text_candidates[0]])
677
+
678
+ # we add one perturbed word
679
+ max_result = max(results, key=lambda x: x.score)
680
+ sample_found = True
681
+ # update synonym
682
+ synonyms = self.update_synonyms(
683
+ synonyms=synonyms,
684
+ index_to_add=random_index,
685
+ curent_result=curent_result,
686
+ results=results,
687
+ transformed_text_candidates=[transformed_text_candidates[0]],
688
+ )
689
+
690
+ # penalty on existing indexes
691
+ for index in perturbed_indexes:
692
+ perturbed_indexes[index] = perturbed_indexes[index] * 0.9
693
+
694
+ perturbed_indexes[random_index] = max_result.score - curent_result.score
695
+ non_perturbed_indexes.remove(random_index)
696
+
697
+ return (
698
+ non_perturbed_indexes,
699
+ perturbed_indexes,
700
+ synonyms,
701
+ max_result,
702
+ sample_found,
703
+ )
704
+
705
+ return (
706
+ non_perturbed_indexes,
707
+ perturbed_indexes,
708
+ synonyms,
709
+ curent_result,
710
+ sample_found,
711
+ )
712
+
713
+ @property
714
+ def is_black_box(self):
715
+ if "gradient" in self.wir_method:
716
+ return False
717
+ else:
718
+ return True
719
+
720
+ def extra_repr_keys(self):
721
+ return ["wir_method"]