Muli-Generationtion-attacks / GreedyMultyGeneration.py
PFEemp2024's picture
Upload GreedyMultyGeneration.py
6e0fc99 verified
raw
history blame contribute delete
No virus
29.8 kB
import random
from textattack.search_methods import SearchMethod
from textattack.goal_function_results import GoalFunctionResultStatus
class GreedyMultipleGeneration(SearchMethod):
def __init__(
self,
wir_method="delete",
k=30,
embed=None,
file=None,
rollback_level=3,
naive=False,
clust=None,
train_file="train_file.csv",
):
self.wir_method = wir_method
self.k = k # maximum iterations
self.embed = embed # universal sentence encoder
self.file = file # similarity file to store the textual similarity
self.naive = naive
self.rollback_level = rollback_level
self.successful_attacks = {}
self.clust = clust
def _get_index_order(self, initial_text, indices_to_order):
"""Returns word indices of ``initial_text`` in descending order of
importance."""
if "unk" in self.wir_method:
leave_one_texts = [
initial_text.replace_word_at_index(i, "[UNK]") for i in indices_to_order
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
index_scores = np.array([result.score for result in leave_one_results])
elif "delete" in self.wir_method:
leave_one_texts = [
initial_text.delete_word_at_index(i) for i in indices_to_order
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
# print(f"leave_one_results : {leave_one_results}")
# print(f"search_over : {search_over}")
index_scores = np.array([result.score for result in leave_one_results])
elif "weighted-saliency" in self.wir_method:
# first, compute word saliency
leave_one_texts = [
initial_text.replace_word_at_index(i, "unk") for i in indices_to_order
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
saliency_scores = np.array([result.score for result in leave_one_results])
softmax_saliency_scores = softmax(
torch.Tensor(saliency_scores), dim=0
).numpy()
# compute the largest change in score we can find by swapping each word
delta_ps = []
for idx in indices_to_order:
# Exit Loop when search_over is True - but we need to make sure delta_ps
# is the same size as softmax_saliency_scores
if search_over:
delta_ps = delta_ps + [0.0] * (
len(softmax_saliency_scores) - len(delta_ps)
)
break
transformed_text_candidates = self.get_transformations(
initial_text,
original_text=initial_text,
indices_to_modify=[idx],
)
if not transformed_text_candidates:
# no valid synonym substitutions for this word
delta_ps.append(0.0)
continue
swap_results, search_over = self.get_goal_results(
transformed_text_candidates
)
score_change = [result.score for result in swap_results]
if not score_change:
delta_ps.append(0.0)
continue
max_score_change = np.max(score_change)
delta_ps.append(max_score_change)
index_scores = softmax_saliency_scores * np.array(delta_ps)
elif "gradient" in self.wir_method:
victim_model = self.get_victim_model()
index_scores = np.zeros(len(indices_to_order))
grad_output = victim_model.get_grad(initial_text.tokenizer_input)
gradient = grad_output["gradient"]
word2token_mapping = initial_text.align_with_model_tokens(victim_model)
for i, index in enumerate(indices_to_order):
matched_tokens = word2token_mapping[index]
if not matched_tokens:
index_scores[i] = 0.0
else:
agg_grad = np.mean(gradient[matched_tokens], axis=0)
index_scores[i] = np.linalg.norm(agg_grad, ord=1)
search_over = False
index_order = np.array(indices_to_order)[(-index_scores).argsort()]
index_scores = sorted(index_scores, reverse=True)
return index_order, search_over, index_scores
# This present a rollback for reducing perturbation only
def swap_to_origin(self, cur_result, initial_result, index):
"""Replace the chosen word with it origin a return a result instance"""
new_attacked_text = cur_result.attacked_text.replace_word_at_index(
index, initial_result.attacked_text.words[index]
)
result, _ = self.get_goal_results([new_attacked_text])
return result[0]
def check_synonym_validity(
ind, ind_synonym, Synonym_indices, Current_attacked_Results, j, synonym
):
"""Checks if a synonym is valid for a given index in the attacked text.
Args:
ind: The index of the word in the attacked text.
ind_synonym: The index of the synonym in the list of synonyms.
Synonym_indices: A dictionary of synonym indices.
Current_attacked_Results: A list of AttackedResult objects.
j: The index of the current AttackedResult object in the list.
synonym: The synonym to check.
Returns:
True if the synonym is valid, False otherwise."""
# Check if the synonym has already been chosen.
if (ind, ind_synonym) in Synonym_indices:
return False
# Get the current attacked text and its words.
current_attacked_text = Current_attacked_Results[j].attacked_text
current_attacked_words = current_attacked_text.words
# Check if the synonym is already present in the attacked text.
if synonym in current_attacked_words[ind]:
return False
return True
def generate_naive_attack(self, initial_result):
curent_result = initial_result
# dict of preturbed indexes with theire scores on on the original text
perturbed_indexes = {}
# possible synonyms of each index with theire scores on the original text to reduce avg num queries
synonyms = {}
# to track indexes with no transformation so we avoid recalculate them to reduce avg num queries
non_usefull_indexes = []
attacked_text = initial_result.attacked_text
_, indices_to_order = self.get_indices_to_order(attacked_text)
# Sort words by order of importance
index_order, search_over, _ = self._get_index_order(
attacked_text, indices_to_order
)
# iterate through words by theire importance
for index in index_order:
if search_over:
break
transformed_text_candidates = self.get_transformations(
curent_result.attacked_text,
original_text=initial_result.attacked_text,
indices_to_modify=[index],
)
if len(transformed_text_candidates) == 0:
# track unusefull words to optimize the code .
non_usefull_indexes.append(index)
continue
else:
results, search_over = self.get_goal_results(
transformed_text_candidates
)
max_result = max(results, key=lambda x: x.score)
if max_result.score > curent_result.score:
if self.naive == False:
# store perturbed indexes with theire score
perturbed_indexes[index] = max_result.score - curent_result.score
# add all synonyms except the one we ve been using
synonyms[index] = [
(results[i].score, trans.words[index])
for i, trans in enumerate(transformed_text_candidates)
if trans.words[index] != max_result.attacked_text.words[index]
]
curent_result = max_result
if curent_result.goal_status == GoalFunctionResultStatus.SUCCEEDED:
return (
curent_result,
perturbed_indexes,
non_usefull_indexes,
synonyms,
curent_result.goal_status,
)
return (
curent_result,
perturbed_indexes,
non_usefull_indexes,
synonyms,
curent_result.goal_status,
)
# TODO we can add depth to track how many words rolled back for more statistics
def perturbed_index_swap(
self,
initial_result,
curent_result,
non_perturbed_indexes,
perturbed_indexes,
synonyms,
steps,
):
past_curent_result = curent_result
# the index with minimum perturbation
rollback_found = False
steps = min(steps, len(perturbed_indexes) - 1)
sucsefull_attacks = []
for _ in range(steps):
# TODO getting the least important perturbated word in the new attacked sample costs a lot
rollback_index = min(perturbed_indexes, key=perturbed_indexes.get)
# TODO remove from perturbed_indexes list and add it to non_perturbed_indexes but with punalitié
# how punalité should look like ? it could be at the end of the quee with visited flag
# or we can just eliminate it .
perturbed_indexes.pop(rollback_index, None)
for index in non_perturbed_indexes:
# early returning
if len(perturbed_indexes) == 1:
return (
curent_result,
non_perturbed_indexes,
perturbed_indexes,
synonyms,
sucsefull_attacks,
rollback_found,
)
# get candidates for non perturbed word
transformed_text_candidates = self.get_transformations(
curent_result.attacked_text,
original_text=initial_result.attacked_text,
indices_to_modify=[index],
)
if len(transformed_text_candidates) == 0:
non_perturbed_indexes.remove(index)
continue # wa7ed ma chaf wa7ed
results, _ = self.get_goal_results(transformed_text_candidates)
# we add one perturbed word
max_result = max(results, key=lambda x: x.score)
for res in results:
if res.score > curent_result.score:
if res.goal_status == GoalFunctionResultStatus.SUCCEEDED:
synonyms = self.update_synonyms(
synonyms=synonyms,
index_to_add=index,
index_to_remove=None,
curent_result=res,
results=results,
transformed_text_candidates=transformed_text_candidates,
)
# stock this sucssefull attack
sucsefull_attacks.append(res)
# we get better score
if max_result.score > curent_result.score:
# eplore minimum perturbation on the original text
inferior = min(perturbed_indexes, key=perturbed_indexes.get)
non_perturbed_indexes.remove(index) # remove perturbed index
perturbed_indexes[index] = max_result.score - curent_result.score
# restore one perturbed
result_rollback = self.swap_to_origin(
max_result, initial_result, rollback_index
)
perturbed_indexes.pop(inferior, None)
new_attacked_text = (
result_rollback.attacked_text.replace_word_at_index(
inferior,
initial_result.attacked_text.words[inferior],
)
)
result, _ = self.get_goal_results([new_attacked_text])
result_rollback = max(result, key=lambda x: x.score)
for res in result:
if res.goal_status == GoalFunctionResultStatus.SUCCEEDED:
synonyms = self.update_synonyms(
synonyms,
index,
inferior,
res,
results,
transformed_text_candidates,
)
# stock this sucssefull attack
sucsefull_attacks.append(res)
if (
result_rollback.goal_status
== GoalFunctionResultStatus.SUCCEEDED
):
rollback_found = True
synonyms = self.update_synonyms(
synonyms,
index,
inferior,
result_rollback,
results,
transformed_text_candidates,
)
curent_result = result_rollback
if rollback_found:
return (
curent_result,
non_perturbed_indexes,
perturbed_indexes,
synonyms,
sucsefull_attacks,
rollback_found,
)
return (
past_curent_result,
non_perturbed_indexes,
perturbed_indexes,
synonyms,
sucsefull_attacks,
rollback_found,
)
def update_synonyms(
self,
synonyms,
index_to_add=None,
index_to_remove=None,
curent_result=None,
results=None,
transformed_text_candidates=None,
):
"""Return an updated list of synonyms"""
if index_to_remove in synonyms and len(synonyms[index_to_remove]) != 0:
# remove the used synonym of certain index
synonyms[index_to_remove] = [
syn
for syn in synonyms[index_to_remove]
if syn[1] != curent_result.attacked_text.words[index_to_remove]
]
# add synonyms of new perturbated word with their score
if index_to_add is not None and transformed_text_candidates is not None:
synonyms[index_to_add] = [
(results[i].score, trans.words[index_to_add])
for i, trans in enumerate(transformed_text_candidates)
if trans.words[index_to_add]
!= curent_result.attacked_text.words[index_to_add]
]
return synonyms
def get_non_perturbed_indexes(
self, initial_result, perturbed_indexes, non_usefull_indexes
):
"""Return a list of non perturbed indexes"""
all_indexes = set(range(len(initial_result.attacked_text.words)))
perturbed_indexes_set = set(perturbed_indexes.keys())
non_usefull_indexes_set = set(non_usefull_indexes)
non_perturbed_indexes = list(
all_indexes - perturbed_indexes_set - non_usefull_indexes_set
)
return non_perturbed_indexes
def perform_search(self, initial_result):
(
curent_result,
perturbed_indexes,
non_usefull_indexes,
synonyms,
goal_statut,
) = self.generate_naive_attack(initial_result)
sucsefull_attacks = [curent_result]
new_curent_sucsefull_attacks = [curent_result]
if not self.naive:
# perturbed_index_swap is our 1s priority (in case of attack succeed goal_statut = 0 )
for i in range(self.k):
non_perturbed_indexes = self.get_non_perturbed_indexes(
initial_result, perturbed_indexes, non_usefull_indexes
)
if len(new_curent_sucsefull_attacks) != 0:
# how to decide on the next text to be treated here we work on the the one with max score
curent_result = max(
new_curent_sucsefull_attacks, key=lambda x: x.score
)
new_curent_sucsefull_attacks.remove(curent_result)
else:
curent_result, synonyms, synonym_found = self.swap_to_synonym(
curent_result, synonyms, perturbed_indexes
)
if synonym_found == True:
sucsefull_attacks.append(curent_result)
new_curent_sucsefull_attacks.append(curent_result)
continue
else:
non_perturbed_indexes = self.get_non_perturbed_indexes(
initial_result, perturbed_indexes, non_usefull_indexes
)
(
non_perturbed_indexes,
perturbed_indexes,
synonyms,
max_result,
sample_found,
) = self.random_selection(
non_perturbed_indexes,
perturbed_indexes,
synonyms,
curent_result,
initial_result,
)
if sample_found == True:
new_curent_sucsefull_attacks.append(max_result)
sucsefull_attacks.append(curent_result)
else:
break
if i % 3 == 0:
non_perturbed_indexes = self.get_non_perturbed_indexes(
initial_result, perturbed_indexes, non_usefull_indexes
)
(
non_perturbed_indexes,
perturbed_indexes,
synonyms,
max_result,
sample_found,
) = self.random_selection(
non_perturbed_indexes,
perturbed_indexes,
synonyms,
curent_result,
initial_result,
)
if sample_found == True:
new_curent_sucsefull_attacks.append(max_result)
sucsefull_attacks.append(curent_result)
if len(perturbed_indexes) > 1 and not goal_statut:
non_perturbed_indexes = self.get_non_perturbed_indexes(
initial_result, perturbed_indexes, non_usefull_indexes
)
(
curent_result,
non_perturbed_indexes,
perturbed_indexes,
synonyms,
sucsefull_attacks_partial,
rollback_found,
) = self.perturbed_index_swap(
initial_result,
curent_result,
non_perturbed_indexes,
perturbed_indexes,
synonyms,
steps=self.rollback_level,
)
if len(sucsefull_attacks_partial) != 0:
sucsefull_attacks.extend(sucsefull_attacks_partial)
new_curent_sucsefull_attacks.extend(sucsefull_attacks_partial)
# Action 2: the case where no rollback found we try to swap synonym and we aim to get better result
if rollback_found == False:
curent_result, synonyms, synonym_found = self.swap_to_synonym(
curent_result, synonyms, perturbed_indexes
)
if synonym_found == True:
sucsefull_attacks.append(curent_result)
new_curent_sucsefull_attacks.append(curent_result)
# if it's a failed attack we give chance for an other synonym
# we will pass it for now because no improvment were found
"""elif goal_statut == 1:
curent_result, synonyms, goal_statut = self.swap_to_synonym(
curent_result, synonyms, perturbed_indexes
)"""
if goal_statut == 0:
sucsefull_attacks_text_scores = []
sucsefull_attacks_text_scores = [
(atk.attacked_text, atk.score)
for atk in sucsefull_attacks
if atk.score > 0.5
]
sucsefull_attacks_text_scores = list(set(sucsefull_attacks_text_scores))
self.successful_attacks[initial_result.attacked_text] = (
sucsefull_attacks_text_scores
)
ground_truth_output = sucsefull_attacks[0].ground_truth_output
self.save_to_train(
self,
initial_result.attacked_text,
sucsefull_attacks_text_scores,
ground_truth_output,
)
try:
best_result = self.min_perturbation(
sucsefull_attacks, initial_result.attacked_text
)
return best_result
except:
return curent_result
def save_to_train(
self,
original_text,
sucsefull_attacks_text_scores,
ground_truth_output,
train_file,
):
successful_attacks = {
original_text.attacked_text: sucsefull_attacks_text_scores
}
self.save_to_JSON(filename="temp.json", successful_attacks=successful_attacks)
self.pipeline(ground_truth_output, train_file)
def pipeline(self, ground_truth_output, train_file):
clust = self.clust
clust.file_ = "temp.json"
sentence_embedding_vectors, masks, scores = clust.prepare_sentences()
unified_mask = clust.get_global_unified_masks(masks=masks)
sentences = clust.apply_mask_on_global_vectors(
global_sentences=sentence_embedding_vectors, unified_masks=unified_mask
)
sentences = clust.global_matrix_to_global_sentences(
global_matrix_sentences=sentences
)
global_clustering = clust.find_global_best_clustering(
sentences, 10, "thumb-rule"
)
selected_samples = clust.global_select_diverce_sample(
scores, sentences, global_clustering
)
clust.save_csv(selected_samples, ground_truth_output, train_file)
def save_to_JSON(self, filename, successful_attacks):
data_list = []
input_dict = {}
for atk in successful_attacks:
successful_attacks_with_scores = [
(atk, score) for atk, score in successful_attacks[atk]
]
input_dict[" ".join(atk.words)] = successful_attacks_with_scores
for original, samples in input_dict.items():
samples_list = [
{"attacked_text": " ".join(text.words), "score": score}
for text, score in samples
]
data_list.append({"original": original, "samples": samples_list})
# Save the formatted data to a JSON file
with open(filename, "w") as json_file:
json.dump({"data": data_list}, json_file, indent=4)
def swap_to_synonym(self, curent_result, synonyms, perturbed_indexes):
# giving chance to the second synonym of the most perturbated word if exists !
found = False
for index in perturbed_indexes:
if index in synonyms and len(synonyms[index]) != 0:
# what about other indexes we may give them chance too !
# response : experiments shows that there is no much improvment taking in consideration the high increase of avg Q-num
synonym = max(synonyms[index], key=lambda x: x[0])
if synonym[0] > 0.8:
new_attacked_text = (
curent_result.attacked_text.replace_word_at_index(
index,
synonym[1],
)
)
curent_result.attacked_text = (
curent_result.attacked_text.replace_word_at_index(
index,
synonym[1],
)
)
synonyms = self.update_synonyms(
synonyms=synonyms,
index_to_remove=index,
curent_result=curent_result,
)
found = True
return curent_result, synonyms, found
# remove index with 0 synonymswithin the list
synonyms.pop(index, None)
return curent_result, synonyms, found
def min_perturbation(self, results, original_text):
# Initialize minimum score and result
min_score = float("inf")
min_result = None
original_text_splited = original_text.words
for result in results:
# Calculate perturbation as the number of words changed
attacked_text = result.attacked_text
perturbation = sum(
i != j for i, j in zip(original_text_splited, attacked_text.words)
)
# Update minimum score and result if necessary
if perturbation < min_score:
min_score = perturbation
min_result = result
return min_result
def check_transformation_compatibility(self, transformation):
"""Since it ranks words by their importance, the algorithm is
limited to word swap and deletion transformations."""
return transformation_consists_of_word_swaps_and_deletions(transformation)
def random_selection(
self,
non_perturbed_indexes,
perturbed_indexes,
synonyms,
curent_result,
initial_result,
):
max_iterations = len(non_perturbed_indexes)
sample_found = False
for _ in range(max_iterations):
random_index = random.choice(non_perturbed_indexes)
transformed_text_candidates = self.get_transformations(
curent_result.attacked_text,
original_text=initial_result.attacked_text,
indices_to_modify=[random_index],
)
if len(transformed_text_candidates) == 0:
non_perturbed_indexes.remove(random_index)
continue
results, _ = self.get_goal_results([transformed_text_candidates[0]])
# we add one perturbed word
max_result = max(results, key=lambda x: x.score)
sample_found = True
# update synonym
synonyms = self.update_synonyms(
synonyms=synonyms,
index_to_add=random_index,
curent_result=curent_result,
results=results,
transformed_text_candidates=[transformed_text_candidates[0]],
)
# penalty on existing indexes
for index in perturbed_indexes:
perturbed_indexes[index] = perturbed_indexes[index] * 0.9
perturbed_indexes[random_index] = max_result.score - curent_result.score
non_perturbed_indexes.remove(random_index)
return (
non_perturbed_indexes,
perturbed_indexes,
synonyms,
max_result,
sample_found,
)
return (
non_perturbed_indexes,
perturbed_indexes,
synonyms,
curent_result,
sample_found,
)
@property
def is_black_box(self):
if "gradient" in self.wir_method:
return False
else:
return True
def extra_repr_keys(self):
return ["wir_method"]