{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "textattack: Loading \u001b[94mdatasets\u001b[0m dataset \u001b[94mrotten_tomatoes\u001b[0m, split \u001b[94mtest\u001b[0m.\n" ] } ], "source": [ "# Import the model\n", "import transformers\n", "from textattack.models.wrappers import HuggingFaceModelWrapper\n", "from textattack.models.helpers import *\n", "\n", "from textattack.datasets import HuggingFaceDataset\n", "\n", "# pretrained_link = 'textattack/distilbert-base-cased-CoLA'\n", "# pretrained_link = \"textattack/bert-base-uncased-yelp-polarity\"\n", "# pretrained_link = \"textattack/bert-base-uncased-imdb\"\n", "pretrained_link = \"textattack/bert-base-uncased-rotten-tomatoes\"\n", "# data_set = \"yelp_polarity\"\n", "\n", "# pretrained_link = \"textattack/bert-base-uncased-ag-news\"\n", "# pretrained_link = '/content/drive/MyDrive/Travail-Doctorat/Projects/phyton/TextAttack_Project/trained_models/pwws_bert-base-uncased-imdb/checkpoint-epoch-4'\n", "# pretrained_link = '/content/drive/MyDrive/Travail-Doctorat/Projects/phyton/TextAttack_Project/trained_models/our_bert-base-uncased-imdb/checkpoint-epoch-4'\n", "\n", "# data_set = \"cola\"\n", "# pretrained_link = \"textattack/distilbert-base-uncased-imdb\"\n", "# pretrained_link = \"textattack/xlnet-base-cased-rotten-tomatoes\"\n", "# pretrained_link = \"textattack/xlnet-base-cased-ag-news\"\n", "# pretrained_link = \"textattack/albert-base-v2-rotten_tomatoes\"\n", "# pretrained_link = \"textattack/albert-base-v2-ag-news\"\n", "# pretrained_link = \"textattack/roberta-base-rotten-tomatoes\"\n", "# pretrained_link = \"textattack/albert-base-v2-rotten-tomatoes\"\n", "# data_set = \"ag_news\"\n", "data_set = \"rotten_tomatoes\"\n", "# data_set = \"sms_spam\"\n", "# data_set = \"MNLI\"\n", "# pretrained_link = \"be7rt-base-uncased\"\n", "# pretrained_link =\"textattack/bert-base-uncased-rotten-tomatoes\"\n", "\n", "# pretrained_link = r\"./\"\n", "\n", "model = transformers.AutoModelForSequenceClassification.from_pretrained(pretrained_link)\n", "tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained_link)\n", "\n", "model_wrapper = HuggingFaceModelWrapper(model, tokenizer)\n", "\n", "dataset = HuggingFaceDataset(data_set, None, \"test\", shuffle=False)\n", "# dataset = HuggingFaceDataset(data_set, None, \"train\", shuffle=False, dataset_columns=(['sms'], 'label'))\n", "\n", "import sys\n", "\n", "sys.setrecursionlimit(3000)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import tensorflow_hub as hub\n", "\n", "embed = hub.load(\"https://tfhub.dev/google/universal-sentence-encoder/4\")" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "textattack: Unknown if model of class compatible with goal function .\n" ] } ], "source": [ "# making the necessary importations\n", "\n", "import numpy as np\n", "import torch\n", "from torch.nn.functional import softmax\n", "\n", "from textattack.goal_functions import UntargetedClassification\n", "from textattack.goal_function_results import GoalFunctionResultStatus\n", "from textattack.search_methods import SearchMethod\n", "from textattack.shared.validators import (\n", " transformation_consists_of_word_swaps_and_deletions,\n", ")\n", "import numpy as np\n", "import torch\n", "from torch.nn.functional import softmax\n", "import json\n", "\n", "from textattack.constraints.pre_transformation import (\n", " RepeatModification,\n", " StopwordModification,\n", ")\n", "from textattack.goal_function_results import GoalFunctionResultStatus\n", "from textattack.search_methods import SearchMethod\n", "from textattack.shared.validators import (\n", " transformation_consists_of_word_swaps_and_deletions,\n", ")\n", "from textattack.constraints.semantics import WordEmbeddingDistance \n", "from textattack.constraints.grammaticality import PartOfSpeech \n", "from textattack.constraints.semantics.sentence_encoders import UniversalSentenceEncoder\n", "from textattack.transformations.word_swaps.word_swap_embedding import WordSwapEmbedding\n", "\n", "transformation = WordSwapEmbedding(max_candidates=50)\n", "use_constraint = UniversalSentenceEncoder(\n", " threshold=0.840845057,\n", " metric=\"angular\",\n", " compare_against_original=False,\n", " window_size=15,\n", " skip_text_shorter_than_window=True,\n", ")\n", "\n", "constraints = [\n", " RepeatModification(),\n", " StopwordModification(),\n", " use_constraint,\n", " PartOfSpeech(allow_verb_noun_swap=True),\n", " WordEmbeddingDistance(min_cos_sim=0.5),\n", "]\n", "\n", "\n", "goal_function = UntargetedClassification(model_wrapper, False, False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Multiple Generation" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "import random\n", "\n", "\n", "class GreedyMultipleGeneration(SearchMethod):\n", "\n", " def __init__(\n", " self,\n", " wir_method=\"delete\",\n", " k=30,\n", " embed=None,\n", " file=None,\n", " rollback_level=3,\n", " naive=False,\n", "\n", " ):\n", "\n", " self.wir_method = wir_method\n", "\n", " self.k = k # number of generated texts\n", "\n", " self.embed = embed # universal sentence encoder\n", "\n", " self.file = file # similarity file to store the textual similarity\n", "\n", " self.naive = naive\n", "\n", " self.rollback_level = rollback_level\n", "\n", " self.successful_attacks = {}\n", "\n", "\n", " def _get_index_order(self, initial_text, indices_to_order):\n", " \"\"\"Returns word indices of ``initial_text`` in descending order of\n", " importance.\"\"\"\n", "\n", "\n", " if \"unk\" in self.wir_method:\n", "\n", " leave_one_texts = [\n", " initial_text.replace_word_at_index(i, \"[UNK]\") for i in indices_to_order\n", " ]\n", "\n", " leave_one_results, search_over = self.get_goal_results(leave_one_texts)\n", "\n", " index_scores = np.array([result.score for result in leave_one_results])\n", "\n", "\n", " elif \"delete\" in self.wir_method:\n", "\n", " leave_one_texts = [\n", " initial_text.delete_word_at_index(i) for i in indices_to_order\n", " ]\n", "\n", " leave_one_results, search_over = self.get_goal_results(leave_one_texts)\n", "\n", "\n", " # print(f\"leave_one_results : {leave_one_results}\")\n", "\n", " # print(f\"search_over : {search_over}\")\n", "\n", "\n", " index_scores = np.array([result.score for result in leave_one_results])\n", "\n", "\n", " elif \"weighted-saliency\" in self.wir_method:\n", "\n", " # first, compute word saliency\n", "\n", " leave_one_texts = [\n", " initial_text.replace_word_at_index(i, \"unk\") for i in indices_to_order\n", " ]\n", "\n", " leave_one_results, search_over = self.get_goal_results(leave_one_texts)\n", "\n", " saliency_scores = np.array([result.score for result in leave_one_results])\n", "\n", "\n", " softmax_saliency_scores = softmax(\n", " torch.Tensor(saliency_scores), dim=0\n", " ).numpy()\n", "\n", "\n", " # compute the largest change in score we can find by swapping each word\n", "\n", " delta_ps = []\n", "\n", " for idx in indices_to_order:\n", "\n", " # Exit Loop when search_over is True - but we need to make sure delta_ps\n", "\n", " # is the same size as softmax_saliency_scores\n", "\n", " if search_over:\n", "\n", "\n", " delta_ps = delta_ps + [0.0] * (\n", " len(softmax_saliency_scores) - len(delta_ps)\n", " )\n", "\n", " break\n", "\n", "\n", " transformed_text_candidates = self.get_transformations(\n", " initial_text,\n", " original_text=initial_text,\n", " indices_to_modify=[idx],\n", " )\n", "\n", " if not transformed_text_candidates:\n", "\n", " # no valid synonym substitutions for this word\n", "\n", "\n", " delta_ps.append(0.0)\n", " continue\n", "\n", "\n", " swap_results, search_over = self.get_goal_results(\n", " transformed_text_candidates\n", "\n", " )\n", "\n", " score_change = [result.score for result in swap_results]\n", "\n", " if not score_change:\n", "\n", "\n", " delta_ps.append(0.0)\n", " continue\n", "\n", "\n", " max_score_change = np.max(score_change)\n", "\n", " delta_ps.append(max_score_change)\n", "\n", "\n", " index_scores = softmax_saliency_scores * np.array(delta_ps)\n", "\n", "\n", " elif \"gradient\" in self.wir_method:\n", "\n", " victim_model = self.get_victim_model()\n", "\n", "\n", " index_scores = np.zeros(len(indices_to_order))\n", "\n", " grad_output = victim_model.get_grad(initial_text.tokenizer_input)\n", "\n", " gradient = grad_output[\"gradient\"]\n", "\n", " word2token_mapping = initial_text.align_with_model_tokens(victim_model)\n", "\n", " for i, index in enumerate(indices_to_order):\n", "\n", " matched_tokens = word2token_mapping[index]\n", "\n", " if not matched_tokens:\n", "\n", " index_scores[i] = 0.0\n", "\n", " else:\n", "\n", " agg_grad = np.mean(gradient[matched_tokens], axis=0)\n", "\n", " index_scores[i] = np.linalg.norm(agg_grad, ord=1)\n", "\n", "\n", " search_over = False\n", "\n", "\n", " index_order = np.array(indices_to_order)[(-index_scores).argsort()]\n", "\n", " index_scores = sorted(index_scores, reverse=True)\n", "\n", " return index_order, search_over, index_scores\n", "\n", "\n", " # This present a rollback for reducing perturbation only\n", "\n", " def swap_to_origin(self, cur_result, initial_result, index):\n", " \"\"\"Replace the chosen word with it origin a return a result instance\"\"\"\n", "\n", " new_attacked_text = cur_result.attacked_text.replace_word_at_index(\n", " index, initial_result.attacked_text.words[index]\n", " )\n", "\n", " result, _ = self.get_goal_results([new_attacked_text])\n", "\n", " return result[0]\n", "\n", "\n", " def check_synonym_validity(\n", " ind, ind_synonym, Synonym_indices, Current_attacked_Results, j, synonym\n", " ):\n", " \"\"\"Checks if a synonym is valid for a given index in the attacked text.\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " Args:\n", "\n", "\n", "\n", "\n", "\n", "\n", " ind: The index of the word in the attacked text.\n", "\n", "\n", "\n", "\n", "\n", "\n", " ind_synonym: The index of the synonym in the list of synonyms.\n", "\n", "\n", "\n", "\n", "\n", "\n", " Synonym_indices: A dictionary of synonym indices.\n", "\n", "\n", "\n", "\n", "\n", "\n", " Current_attacked_Results: A list of AttackedResult objects.\n", "\n", "\n", "\n", "\n", "\n", "\n", " j: The index of the current AttackedResult object in the list.\n", "\n", "\n", "\n", "\n", "\n", "\n", " synonym: The synonym to check.\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " Returns:\n", "\n", "\n", " True if the synonym is valid, False otherwise.\"\"\"\n", "\n", "\n", " # Check if the synonym has already been chosen.\n", "\n", " if (ind, ind_synonym) in Synonym_indices:\n", "\n", " return False\n", "\n", "\n", " # Get the current attacked text and its words.\n", "\n", "\n", " current_attacked_text = Current_attacked_Results[j].attacked_text\n", "\n", " current_attacked_words = current_attacked_text.words\n", "\n", "\n", " # Check if the synonym is already present in the attacked text.\n", "\n", " if synonym in current_attacked_words[ind]:\n", "\n", " return False\n", "\n", "\n", " return True\n", "\n", "\n", " def generate_naive_attack(self, initial_result):\n", "\n", " curent_result = initial_result\n", "\n", " # dict of preturbed indexes with theire scores on on the original text\n", "\n", " perturbed_indexes = {}\n", "\n", " # possible synonyms of each index with theire scores on the original text to reduce avg num queries\n", "\n", " synonyms = {}\n", "\n", " # to track indexes with no transformation so we avoid recalculate them to reduce avg num queries\n", "\n", " non_usefull_indexes = []\n", "\n", " attacked_text = initial_result.attacked_text\n", "\n", " _, indices_to_order = self.get_indices_to_order(attacked_text)\n", "\n", "\n", " # Sort words by order of importance\n", "\n", "\n", " index_order, search_over, _ = self._get_index_order(\n", " attacked_text, indices_to_order\n", " )\n", "\n", "\n", " # iterate through words by theire importance\n", "\n", " for index in index_order:\n", "\n", " if search_over:\n", "\n", " break\n", "\n", " transformed_text_candidates = self.get_transformations(\n", " curent_result.attacked_text,\n", " original_text=initial_result.attacked_text,\n", " indices_to_modify=[index],\n", " )\n", "\n", "\n", " if len(transformed_text_candidates) == 0:\n", "\n", " # track unusefull words to optimize the code .\n", "\n", " non_usefull_indexes.append(index)\n", " continue\n", "\n", "\n", " else:\n", "\n", "\n", " results, search_over = self.get_goal_results(\n", " transformed_text_candidates\n", "\n", " )\n", "\n", "\n", " max_result = max(results, key=lambda x: x.score)\n", "\n", "\n", " if max_result.score > curent_result.score:\n", "\n", "\n", " if self.naive == False:\n", "\n", " # store perturbed indexes with theire score\n", "\n", " perturbed_indexes[index] = max_result.score - curent_result.score\n", "\n", " # add all synonyms except the one we ve been using\n", "\n", " synonyms[index] = [\n", " (results[i].score, trans.words[index])\n", " for i, trans in enumerate(transformed_text_candidates)\n", " if trans.words[index] != max_result.attacked_text.words[index]\n", " ]\n", "\n", "\n", " curent_result = max_result\n", "\n", "\n", " if curent_result.goal_status == GoalFunctionResultStatus.SUCCEEDED:\n", "\n", " return (\n", " curent_result,\n", " perturbed_indexes,\n", " non_usefull_indexes,\n", " synonyms,\n", " curent_result.goal_status,\n", " )\n", "\n", "\n", " return (\n", " curent_result,\n", " perturbed_indexes,\n", " non_usefull_indexes,\n", " synonyms,\n", " curent_result.goal_status,\n", " )\n", "\n", "\n", " def perturbed_index_swap(\n", " self,\n", " initial_result,\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " steps,\n", " ):\n", "\n", " past_curent_result = curent_result\n", "\n", " # the index with minimum perturbation\n", "\n", " rollback_found = False\n", "\n", " steps = min(steps, len(perturbed_indexes) - 1)\n", "\n", " sucsefull_attacks = []\n", "\n", " for _ in range(steps):\n", "\n", "\n", " # TODO getting the least important perturbated word in the new attacked sample costs a lot\n", "\n", " rollback_index = min(perturbed_indexes, key=perturbed_indexes.get)\n", "\n", " # TODO remove from perturbed_indexes list and add it to non_perturbed_indexes but with punalitié\n", "\n", " # how punalité should look like ? it could be at the end of the quee with visited flag\n", "\n", " # or we can just eliminate it .\n", "\n", " perturbed_indexes.pop(rollback_index, None)\n", "\n", " for index in non_perturbed_indexes:\n", "\n", "\n", " # get candidates for non perturbed word\n", "\n", "\n", " transformed_text_candidates = self.get_transformations(\n", " curent_result.attacked_text,\n", " original_text=initial_result.attacked_text,\n", " indices_to_modify=[index],\n", " )\n", "\n", "\n", " if len(transformed_text_candidates) == 0:\n", "\n", " continue # wa7ed ma chaf wa7ed\n", "\n", "\n", " results, _ = self.get_goal_results(transformed_text_candidates)\n", "\n", "\n", " # we add one perturbed word\n", "\n", "\n", " max_result = max(results, key=lambda x: x.score)\n", "\n", "\n", " # we get better score\n", "\n", " if max_result.score > curent_result.score:\n", "\n", " # eplore minimum perturbation on the original text\n", "\n", "\n", " inferior = min(perturbed_indexes, key=perturbed_indexes.get)\n", "\n", " non_perturbed_indexes.remove(index) # remove perturbed index\n", "\n", "\n", " perturbed_indexes[index] = max_result.score - curent_result.score\n", "\n", " # restore one perturbed\n", "\n", " result_rollback = self.swap_to_origin(\n", " max_result, initial_result, rollback_index\n", " )\n", "\n", "\n", " perturbed_indexes.pop(inferior, None)\n", "\n", "\n", " new_attacked_text = (\n", " result_rollback.attacked_text.replace_word_at_index(\n", " inferior,\n", " initial_result.attacked_text.words[inferior],\n", " )\n", " )\n", "\n", "\n", " result, _ = self.get_goal_results([new_attacked_text])\n", "\n", "\n", " result_rollback = max(result, key=lambda x: x.score)\n", "\n", "\n", " if (\n", " result_rollback.goal_status\n", " == GoalFunctionResultStatus.SUCCEEDED\n", " ):\n", "\n", " synonyms = self.update_synonyms(\n", " synonyms,\n", " index,\n", " inferior,\n", " result_rollback,\n", " results,\n", " transformed_text_candidates,\n", " )\n", "\n", " # stock this sucssefull attack\n", "\n", " sucsefull_attacks.append(result_rollback)\n", "\n", "\n", " rollback_found = True\n", "\n", " curent_result = result_rollback\n", "\n", "\n", " if rollback_found:\n", "\n", " return (\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks,\n", " rollback_found,\n", " )\n", "\n", " return (\n", " past_curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks,\n", " rollback_found,\n", " )\n", "\n", " def random_selection(\n", " self,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " curent_result,\n", " initial_result,\n", " ):\n", " max_iterations = len(non_perturbed_indexes)\n", " sample_found = False\n", " for _ in range(max_iterations):\n", " random_index = random.choice(non_perturbed_indexes)\n", " transformed_text_candidates = self.get_transformations(\n", " curent_result.attacked_text,\n", " original_text=initial_result.attacked_text,\n", " indices_to_modify=[random_index],\n", " )\n", " if len(transformed_text_candidates) != 0:\n", " non_perturbed_indexes.remove(random_index)\n", " continue\n", "\n", " results, _ = self.get_goal_results(transformed_text_candidates)\n", "\n", " # we add one perturbed word\n", " max_result = max(results, key=lambda x: x.score)\n", " sample_found = True\n", " # update synonym\n", " synonyms = self.update_synonyms(\n", " synonyms=synonyms,\n", " index_to_add=random_index,\n", " curent_result=curent_result,\n", " results=results,\n", " transformed_text_candidates=transformed_text_candidates,\n", " )\n", " perturbed_indexes[random_index] = max_result.score - curent_result.score\n", "\n", " # penalty on previous\n", " for index in perturbed_indexes:\n", " perturbed_indexes[index] = perturbed_indexes[index] * 0.9\n", "\n", " return (\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " max_result,\n", " sample_found,\n", " )\n", " return (\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " max_result,\n", " sample_found,\n", " )\n", "\n", " def update_synonyms(\n", " self,\n", " synonyms,\n", " index_to_add=None,\n", " index_to_remove=None,\n", " curent_result=None,\n", " results=None,\n", " transformed_text_candidates=None,\n", " ):\n", " \"\"\"Return an updated list of synonyms\"\"\"\n", "\n", " if (\n", " index_to_remove is not None\n", " and index_to_remove in synonyms\n", " and len(synonyms[index_to_remove]) != 0\n", " ):\n", "\n", " # remove the used synonym of certain index\n", "\n", " synonyms[index_to_remove] = [\n", " syn\n", " for syn in synonyms[index_to_remove]\n", " if syn[1] != curent_result.attacked_text.words[index_to_remove]\n", " ]\n", "\n", "\n", " # add synonyms of new perturbated word with their score\n", "\n", " if index_to_add is not None and transformed_text_candidates is not None:\n", "\n", " synonyms[index_to_add] = [\n", " (results[i].score, trans.words[index_to_add])\n", " for i, trans in enumerate(transformed_text_candidates)\n", " if trans.words[index_to_add]\n", " != curent_result.attacked_text.words[index_to_add]\n", " ]\n", "\n", "\n", " return synonyms\n", "\n", "\n", " def get_non_perturbed_indexes(\n", " self, initial_result, perturbed_indexes, non_usefull_indexes\n", " ):\n", " \"\"\"Return a list of non perturbed indexes\"\"\"\n", "\n", " all_indexes = set(range(len(initial_result.attacked_text.words)))\n", "\n", " perturbed_indexes_set = set(perturbed_indexes.keys())\n", "\n", " non_usefull_indexes_set = set(non_usefull_indexes)\n", "\n", " non_perturbed_indexes = list(\n", " all_indexes - perturbed_indexes_set - non_usefull_indexes_set\n", " )\n", "\n", " return non_perturbed_indexes\n", "\n", "\n", " def perform_search(self, initial_result):\n", "\n", " (\n", " curent_result,\n", " perturbed_indexes,\n", " non_usefull_indexes,\n", " synonyms,\n", " goal_statut,\n", " ) = self.generate_naive_attack(initial_result)\n", "\n", " sucsefull_attacks = [curent_result]\n", "\n", "\n", " new_curent_sucsefull_attacks = [curent_result]\n", "\n", " if not self.naive:\n", "\n", " # perturbed_index_swap is our 1s priority (in case of attack succeed goal_statut = 0 )\n", "\n", "\n", " for _ in range(self.k):\n", "\n", "\n", " if len(new_curent_sucsefull_attacks) != 0:\n", "\n", " # how to decide on the next text to be treated here we work on the the one with max score\n", "\n", " curent_result = max(\n", " new_curent_sucsefull_attacks, key=lambda x: x.score\n", " )\n", "\n", " new_curent_sucsefull_attacks.remove(curent_result)\n", "\n", " else:\n", "\n", " curent_result, synonyms, synonym_found = self.swap_to_synonym(\n", " curent_result, synonyms, perturbed_indexes\n", " )\n", "\n", " if synonym_found == True:\n", " \n", " sucsefull_attacks.append(curent_result)\n", "\n", " new_curent_sucsefull_attacks.append(curent_result)\n", " continue\n", "\n", "\n", " else:\n", "\n", " break\n", "\n", "\n", " if len(perturbed_indexes) > 1 and not goal_statut:\n", "\n", " non_perturbed_indexes = self.get_non_perturbed_indexes(\n", " initial_result, perturbed_indexes, non_usefull_indexes\n", " )\n", "\n", " (\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks_partial,\n", " rollback_found,\n", " ) = self.perturbed_index_swap(\n", " initial_result,\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " steps=self.rollback_level,\n", " )\n", "\n", " if len(sucsefull_attacks_partial) != 0:\n", "\n", " sucsefull_attacks.extend(sucsefull_attacks_partial)\n", "\n", " new_curent_sucsefull_attacks.extend(sucsefull_attacks_partial)\n", "\n", " # Action 2: the case where no rollback found we try to swap synonym and we aim to get better result\n", "\n", " if rollback_found == False:\n", "\n", " curent_result, synonyms, synonym_found = self.swap_to_synonym(\n", " curent_result, synonyms, perturbed_indexes\n", " )\n", "\n", " if synonym_found == True:\n", "\n", " sucsefull_attacks.append(curent_result)\n", "\n", " new_curent_sucsefull_attacks.append(curent_result)\n", "\n", "\n", " # if it's a failed attack we give chance for an other synonym\n", "\n", " # we will pass it for now because no improvment were found\n", "\n", "\n", " \"\"\"elif goal_statut == 1:\n", " curent_result, synonyms, goal_statut = self.swap_to_synonym(\n", " curent_result, synonyms, perturbed_indexes\n", " )\"\"\"\n", "\n", "\n", " if goal_statut == 0:\n", "\n", " sucsefull_attacks_text_scores = [\n", " (atk.attacked_text, atk.score) for atk in sucsefull_attacks\n", " ]\n", "\n", "\n", " self.successful_attacks[initial_result.attacked_text] = (\n", " sucsefull_attacks_text_scores\n", " )\n", "\n", "\n", " try:\n", "\n", " best_result = self.min_perturbation(\n", " sucsefull_attacks, initial_result.attacked_text\n", " )\n", "\n", " return best_result\n", " except:\n", " return curent_result\n", "\n", "\n", " def save_to_JSON(self, filename):\n", "\n", " data_list = []\n", "\n", " input_dict = {}\n", "\n", " for atk in self.successful_attacks:\n", "\n", " successful_attacks_with_scores = [\n", " (atk, score) for atk, score in self.successful_attacks[atk]\n", " ]\n", "\n", " input_dict[\" \".join(atk.words)] = successful_attacks_with_scores\n", "\n", " for original, samples in input_dict.items():\n", "\n", " samples_list = [\n", " {\"attacked_text\": \" \".join(text.words), \"score\": score}\n", " for text, score in samples\n", " ]\n", "\n", " data_list.append({\"original\": original, \"samples\": samples_list})\n", "\n", "\n", " # Save the formatted data to a JSON file\n", "\n", " with open(filename, \"w\") as json_file:\n", "\n", " json.dump({\"data\": data_list}, json_file, indent=4)\n", "\n", "\n", " def swap_to_synonym(self, curent_result, synonyms, perturbed_indexes):\n", "\n", " # giving chance to the second synonym of the most perturbated word if exists !\n", "\n", " found = False\n", "\n", " for index in perturbed_indexes:\n", "\n", " if index in synonyms and len(synonyms[index]) != 0:\n", "\n", " # what about other indexes we may give them chance too !\n", "\n", "\n", " synonym = max(synonyms[index], key=lambda x: x[0])\n", "\n", " new_attacked_text = curent_result.attacked_text.replace_word_at_index(\n", " index,\n", " synonym[1],\n", " )\n", "\n", "\n", " result, _ = self.get_goal_results([new_attacked_text])\n", "\n", "\n", " if result[0].goal_status == GoalFunctionResultStatus.SUCCEEDED:\n", "\n", " synonyms = self.update_synonyms(\n", " synonyms=synonyms,\n", " index_to_remove=index,\n", " curent_result=result[0],\n", " )\n", "\n", " found = True\n", "\n", " return result[0], synonyms, found\n", "\n", " # remove index with 0 synonymswithin the list\n", "\n", " synonyms.pop(index, None)\n", "\n", "\n", " return curent_result, synonyms, found\n", "\n", "\n", " def min_perturbation(self, results, original_text):\n", "\n", " # Initialize minimum score and result\n", "\n", " min_score = float(\"inf\")\n", "\n", " min_result = None\n", "\n", " original_text_splited = original_text.words\n", "\n", " for result in results:\n", "\n", " # Calculate perturbation as the number of words changed\n", "\n", " attacked_text = result.attacked_text\n", "\n", " perturbation = sum(\n", " i != j for i, j in zip(original_text_splited, attacked_text.words)\n", " )\n", "\n", "\n", " # Update minimum score and result if necessary\n", "\n", " if perturbation < min_score:\n", "\n", " min_score = perturbation\n", "\n", " min_result = result\n", "\n", " return min_result\n", "\n", "\n", " def check_transformation_compatibility(self, transformation):\n", " \"\"\"Since it ranks words by their importance, the algorithm is\n", "\n", "\n", "\n", "\n", "\n", "\n", " limited to word swap and deletion transformations.\"\"\"\n", "\n", " return transformation_consists_of_word_swaps_and_deletions(transformation)\n", "\n", "\n", " @property\n", " def is_black_box(self):\n", "\n", " if \"gradient\" in self.wir_method:\n", "\n", " return False\n", "\n", " else:\n", "\n", " return True\n", "\n", "\n", " def extra_repr_keys(self):\n", "\n", "\n", " return [\"wir_method\"]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import difflib" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Do Not excute this cell" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import sys\n", "\n", "sys.setrecursionlimit(3000)\n", "lines = 1\n", "from textattack.search_methods import *\n", "from textattack import AttackArgs\n", "from clustering import Clustering\n", "attack_name = \"gradient-multy-generation\"\n", "\n", "\n", "search_method = GreedyMultipleGeneration(\n", " wir_method=attack_name, k=10, embed=embed, naive=True, rollback_level=3\n", ")\n", "\n", "attack_args = AttackArgs(num_examples=lines)\n", "# attack_args = AttackArgs( num_examples=lines,checkpoint_interval = 20,checkpoint_dir =checkpoint_path)\n", "attack = Attack(goal_function, constraints, transformation, search_method)\n", "\n", "clust = clust = Clustering(\n", " file_=\"delete 50 level 1.json\",\n", " victim_model_wrapper=victim_model_wrapper,\n", " victim_model=victim_model,\n", " attack=attack,\n", " output=\"selected sample from clustering BAE delete.json\"\n", " )\n", "attack.clust = clust\n", "attacker = Attacker(attack, dataset, attack_args)\n", "\n", "\n", "# results_iterable = attacker.attack_dataset()" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import torch\n", "\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "device(type='cuda')" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Num GPUs Available: 1\n" ] } ], "source": [ "import tensorflow as tf\n", "print(\"Num GPUs Available: \", len(tf.config.list_physical_devices('GPU')))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import random\n", "\n", "\n", "class GreedyMultipleGeneration(SearchMethod):\n", " def __init__(\n", " self,\n", " wir_method=\"delete\",\n", " k=30,\n", " embed=None,\n", " file=None,\n", " rollback_level=3,\n", " naive=False,\n", " ):\n", " self.wir_method = wir_method\n", " self.k = k # number of generated texts\n", " self.embed = embed # universal sentence encoder\n", " self.file = file # similarity file to store the textual similarity\n", " self.naive = naive\n", " self.rollback_level = rollback_level\n", " self.successful_attacks = {}\n", "\n", " def _get_index_order(self, initial_text, indices_to_order):\n", " \"\"\"Returns word indices of ``initial_text`` in descending order of\n", " importance.\"\"\"\n", "\n", " if \"unk\" in self.wir_method:\n", " leave_one_texts = [\n", " initial_text.replace_word_at_index(i, \"[UNK]\") for i in indices_to_order\n", " ]\n", " leave_one_results, search_over = self.get_goal_results(leave_one_texts)\n", " index_scores = np.array([result.score for result in leave_one_results])\n", "\n", " elif \"delete\" in self.wir_method:\n", " leave_one_texts = [\n", " initial_text.delete_word_at_index(i) for i in indices_to_order\n", " ]\n", " leave_one_results, search_over = self.get_goal_results(leave_one_texts)\n", " # print(f\"leave_one_results : {leave_one_results}\")\n", " # print(f\"search_over : {search_over}\")\n", "\n", " index_scores = np.array([result.score for result in leave_one_results])\n", "\n", " elif \"weighted-saliency\" in self.wir_method:\n", " # first, compute word saliency\n", " leave_one_texts = [\n", " initial_text.replace_word_at_index(i, \"unk\") for i in indices_to_order\n", " ]\n", " leave_one_results, search_over = self.get_goal_results(leave_one_texts)\n", " saliency_scores = np.array([result.score for result in leave_one_results])\n", "\n", " softmax_saliency_scores = softmax(\n", " torch.Tensor(saliency_scores), dim=0\n", " ).numpy()\n", "\n", " # compute the largest change in score we can find by swapping each word\n", " delta_ps = []\n", " for idx in indices_to_order:\n", " # Exit Loop when search_over is True - but we need to make sure delta_ps\n", " # is the same size as softmax_saliency_scores\n", " if search_over:\n", " delta_ps = delta_ps + [0.0] * (\n", " len(softmax_saliency_scores) - len(delta_ps)\n", " )\n", " break\n", "\n", " transformed_text_candidates = self.get_transformations(\n", " initial_text,\n", " original_text=initial_text,\n", " indices_to_modify=[idx],\n", " )\n", " if not transformed_text_candidates:\n", " # no valid synonym substitutions for this word\n", " delta_ps.append(0.0)\n", " continue\n", " swap_results, search_over = self.get_goal_results(\n", " transformed_text_candidates\n", " )\n", " score_change = [result.score for result in swap_results]\n", " if not score_change:\n", " delta_ps.append(0.0)\n", " continue\n", " max_score_change = np.max(score_change)\n", " delta_ps.append(max_score_change)\n", "\n", " index_scores = softmax_saliency_scores * np.array(delta_ps)\n", "\n", " elif \"gradient\" in self.wir_method:\n", " victim_model = self.get_victim_model()\n", "\n", " index_scores = np.zeros(len(indices_to_order))\n", " grad_output = victim_model.get_grad(initial_text.tokenizer_input)\n", " gradient = grad_output[\"gradient\"]\n", " word2token_mapping = initial_text.align_with_model_tokens(victim_model)\n", " for i, index in enumerate(indices_to_order):\n", " matched_tokens = word2token_mapping[index]\n", " if not matched_tokens:\n", " index_scores[i] = 0.0\n", " else:\n", " agg_grad = np.mean(gradient[matched_tokens], axis=0)\n", " index_scores[i] = np.linalg.norm(agg_grad, ord=1)\n", "\n", " search_over = False\n", "\n", " index_order = np.array(indices_to_order)[(-index_scores).argsort()]\n", " index_scores = sorted(index_scores, reverse=True)\n", " return index_order, search_over, index_scores\n", "\n", " # This present a rollback for reducing perturbation only\n", " def swap_to_origin(self, cur_result, initial_result, index):\n", " \"\"\"Replace the chosen word with it origin a return a result instance\"\"\"\n", " new_attacked_text = cur_result.attacked_text.replace_word_at_index(\n", " index, initial_result.attacked_text.words[index]\n", " )\n", " result, _ = self.get_goal_results([new_attacked_text])\n", " return result[0]\n", "\n", " def check_synonym_validity(\n", " ind, ind_synonym, Synonym_indices, Current_attacked_Results, j, synonym\n", " ):\n", " \"\"\"Checks if a synonym is valid for a given index in the attacked text.\n", "\n", " Args:\n", " ind: The index of the word in the attacked text.\n", " ind_synonym: The index of the synonym in the list of synonyms.\n", " Synonym_indices: A dictionary of synonym indices.\n", " Current_attacked_Results: A list of AttackedResult objects.\n", " j: The index of the current AttackedResult object in the list.\n", " synonym: The synonym to check.\n", "\n", " Returns:\n", " True if the synonym is valid, False otherwise.\"\"\"\n", "\n", " # Check if the synonym has already been chosen.\n", " if (ind, ind_synonym) in Synonym_indices:\n", " return False\n", "\n", " # Get the current attacked text and its words.\n", " current_attacked_text = Current_attacked_Results[j].attacked_text\n", " current_attacked_words = current_attacked_text.words\n", "\n", " # Check if the synonym is already present in the attacked text.\n", " if synonym in current_attacked_words[ind]:\n", " return False\n", "\n", " return True\n", "\n", " def generate_naive_attack(self, initial_result):\n", " curent_result = initial_result\n", " # dict of preturbed indexes with theire scores on on the original text\n", " perturbed_indexes = {}\n", " # possible synonyms of each index with theire scores on the original text to reduce avg num queries\n", " synonyms = {}\n", " # to track indexes with no transformation so we avoid recalculate them to reduce avg num queries\n", " non_usefull_indexes = []\n", " attacked_text = initial_result.attacked_text\n", " _, indices_to_order = self.get_indices_to_order(attacked_text)\n", "\n", " # Sort words by order of importance\n", "\n", " index_order, search_over, _ = self._get_index_order(\n", " attacked_text, indices_to_order\n", " )\n", "\n", " # iterate through words by theire importance\n", " for index in index_order:\n", " if search_over:\n", " break\n", " transformed_text_candidates = self.get_transformations(\n", " curent_result.attacked_text,\n", " original_text=initial_result.attacked_text,\n", " indices_to_modify=[index],\n", " )\n", "\n", " if len(transformed_text_candidates) == 0:\n", " # track unusefull words to optimize the code .\n", " non_usefull_indexes.append(index)\n", " continue\n", " else:\n", " results, search_over = self.get_goal_results(\n", " transformed_text_candidates\n", " )\n", "\n", " max_result = max(results, key=lambda x: x.score)\n", "\n", " if max_result.score > curent_result.score:\n", " if self.naive == False:\n", " # store perturbed indexes with theire score\n", " perturbed_indexes[index] = max_result.score - curent_result.score\n", " # add all synonyms except the one we ve been using\n", " synonyms[index] = [\n", " (results[i].score, trans.words[index])\n", " for i, trans in enumerate(transformed_text_candidates)\n", " if trans.words[index] != max_result.attacked_text.words[index]\n", " ]\n", "\n", " curent_result = max_result\n", "\n", " if curent_result.goal_status == GoalFunctionResultStatus.SUCCEEDED:\n", " return (\n", " curent_result,\n", " perturbed_indexes,\n", " non_usefull_indexes,\n", " synonyms,\n", " curent_result.goal_status,\n", " )\n", "\n", " return (\n", " curent_result,\n", " perturbed_indexes,\n", " non_usefull_indexes,\n", " synonyms,\n", " curent_result.goal_status,\n", " )\n", "\n", " # TODO we can add depth to track how many words rolled back for more statistics\n", "\n", " def perturbed_index_swap(\n", " self,\n", " initial_result,\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " steps,\n", " ):\n", " past_curent_result = curent_result\n", " # the index with minimum perturbation\n", " rollback_found = False\n", " steps = min(steps, len(perturbed_indexes) - 1)\n", " sucsefull_attacks = []\n", " for _ in range(steps):\n", " # TODO getting the least important perturbated word in the new attacked sample costs a lot\n", " rollback_index = min(perturbed_indexes, key=perturbed_indexes.get)\n", " # TODO remove from perturbed_indexes list and add it to non_perturbed_indexes but with punalitié\n", " # how punalité should look like ? it could be at the end of the quee with visited flag\n", " # or we can just eliminate it .\n", " perturbed_indexes.pop(rollback_index, None)\n", "\n", " for index in non_perturbed_indexes:\n", "\n", " # get candidates for non perturbed word\n", " transformed_text_candidates = self.get_transformations(\n", " curent_result.attacked_text,\n", " original_text=initial_result.attacked_text,\n", " indices_to_modify=[index],\n", " )\n", "\n", " if len(transformed_text_candidates) == 0:\n", " # since there is high similarity between sentences we accept as an heuristic that there is no possible\n", " # transformations of this index for next generated texts . this will reduce avg num-Q\n", " non_perturbed_indexes.remove(index)\n", " continue # wa7ed ma chaf wa7ed\n", "\n", " results, _ = self.get_goal_results(transformed_text_candidates)\n", " # Keep only succssefull attacks so dont need to check again in swap synonym\n", " transformed_text_candidates = [\n", " transformed_text\n", " for candidate, transformed_text in zip(\n", " results, transformed_text_candidates\n", " )\n", " if candidate.goal_status == GoalFunctionResultStatus.SUCCEEDED\n", " ]\n", "\n", " # we add one perturbed word\n", " max_result = max(results, key=lambda x: x.score)\n", "\n", " # we get better score\n", " if max_result.score > curent_result.score:\n", " # explore minimum perturbation on the original text\n", " inferior = min(perturbed_indexes, key=perturbed_indexes.get)\n", " non_perturbed_indexes.remove(index) # remove perturbed index\n", "\n", " perturbed_indexes[index] = max_result.score - curent_result.score\n", " # restore one perturbed\n", " result_rollback = self.swap_to_origin(\n", " max_result, initial_result, rollback_index\n", " )\n", "\n", " perturbed_indexes.pop(inferior, None)\n", "\n", " new_attacked_text = (\n", " result_rollback.attacked_text.replace_word_at_index(\n", " inferior,\n", " initial_result.attacked_text.words[inferior],\n", " )\n", " )\n", "\n", " result, _ = self.get_goal_results([new_attacked_text])\n", "\n", " result_rollback = max(result, key=lambda x: x.score)\n", "\n", " if (\n", " result_rollback.goal_status\n", " == GoalFunctionResultStatus.SUCCEEDED\n", " ):\n", " synonyms = self.update_synonyms(\n", " synonyms,\n", " index,\n", " inferior,\n", " result_rollback,\n", " results,\n", " transformed_text_candidates,\n", " )\n", " # stock this sucssefull attack\n", " sucsefull_attacks.append(result_rollback)\n", "\n", " rollback_found = True\n", " curent_result = result_rollback\n", "\n", " if rollback_found:\n", " return (\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks,\n", " rollback_found,\n", " )\n", " return (\n", " past_curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks,\n", " rollback_found,\n", " )\n", "\n", " def random_selection(\n", " self,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " curent_result,\n", " initial_result,\n", " ):\n", " max_iterations = len(non_perturbed_indexes)\n", " sample_found = False\n", " for _ in range(max_iterations):\n", " random_index = random.choice(non_perturbed_indexes)\n", " transformed_text_candidates = self.get_transformations(\n", " curent_result.attacked_text,\n", " original_text=initial_result.attacked_text,\n", " indices_to_modify=[random_index],\n", " )\n", " if len(transformed_text_candidates) == 0:\n", " non_perturbed_indexes.remove(random_index)\n", " continue\n", "\n", " results, _ = self.get_goal_results([transformed_text_candidates[0]])\n", "\n", " # we add one perturbed word\n", " max_result = max(results, key=lambda x: x.score)\n", " sample_found = True\n", " # update synonym\n", " synonyms = self.update_synonyms(\n", " synonyms=synonyms,\n", " index_to_add=random_index,\n", " curent_result=curent_result,\n", " results=results,\n", " transformed_text_candidates=[transformed_text_candidates[0]],\n", " )\n", "\n", " # penalty on existing indexes\n", " for index in perturbed_indexes:\n", " perturbed_indexes[index] = perturbed_indexes[index] * 0.9\n", "\n", " perturbed_indexes[random_index] = max_result.score - curent_result.score\n", " non_perturbed_indexes.remove(random_index)\n", "\n", " return (\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " max_result,\n", " sample_found,\n", " )\n", "\n", " return (\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " curent_result,\n", " sample_found,\n", " )\n", "\n", " def update_synonyms(\n", " self,\n", " synonyms,\n", " index_to_add=None,\n", " index_to_remove=None,\n", " curent_result=None,\n", " results=None,\n", " transformed_text_candidates=None,\n", " ):\n", " \"\"\"Return an updated list of synonyms\"\"\"\n", "\n", " if (\n", " index_to_remove is not None\n", " and index_to_remove in synonyms\n", " and len(synonyms[index_to_remove]) != 0\n", " ):\n", "\n", " # remove the used synonym of certain index\n", " synonyms[index_to_remove] = [\n", " syn\n", " for syn in synonyms[index_to_remove]\n", " if syn[1] != curent_result.attacked_text.words[index_to_remove]\n", " ]\n", "\n", " # add synonyms of new perturbated word with their score\n", " if index_to_add is not None and transformed_text_candidates is not None:\n", " synonyms[index_to_add] = [\n", " (results[i].score, trans.words[index_to_add])\n", " for i, trans in enumerate(transformed_text_candidates)\n", " if trans.words[index_to_add]\n", " != curent_result.attacked_text.words[index_to_add]\n", " ]\n", "\n", " return synonyms\n", "\n", " def get_non_perturbed_indexes(\n", " self, initial_result, perturbed_indexes, non_usefull_indexes\n", " ):\n", " \"\"\"Return a list of non perturbed indexes\"\"\"\n", " all_indexes = set(range(len(initial_result.attacked_text.words)))\n", " perturbed_indexes_set = set(perturbed_indexes.keys())\n", " non_usefull_indexes_set = set(non_usefull_indexes)\n", " non_perturbed_indexes = list(\n", " all_indexes - perturbed_indexes_set - non_usefull_indexes_set\n", " )\n", " return non_perturbed_indexes\n", " \n", " def perform_search(self, initial_result):\n", " (\n", " curent_result,\n", " perturbed_indexes,\n", " non_usefull_indexes,\n", " synonyms,\n", " goal_statut,\n", " ) = self.generate_naive_attack(initial_result)\n", " sucsefull_attacks = [curent_result]\n", "\n", " new_curent_sucsefull_attacks = [curent_result]\n", " if not self.naive:\n", " # perturbed_index_swap is our 1s priority (in case of attack succeed goal_statut = 0 )\n", " for i in range(self.k):\n", "\n", " if len(new_curent_sucsefull_attacks) != 0:\n", " if i % 5 == 0 :\n", " (\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " max_result,\n", " sample_found\n", " ) = self.random_selection(\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " curent_result,\n", " initial_result,\n", " )\n", "\n", " if sample_found == True:\n", " new_curent_sucsefull_attacks.append(max_result) \n", " # how to decide on the next text to be treated here we work on the the one with max score\n", " curent_result = max(\n", " new_curent_sucsefull_attacks, key=lambda x: x.score\n", " )\n", " new_curent_sucsefull_attacks.remove(curent_result)\n", " else:\n", " curent_result, synonyms, synonym_found = self.swap_to_synonym(\n", " curent_result, synonyms, perturbed_indexes\n", " )\n", " if synonym_found == True:\n", " sucsefull_attacks.append(curent_result)\n", " new_curent_sucsefull_attacks.append(curent_result)\n", " continue\n", " else:\n", " break\n", " if len(perturbed_indexes) > 1 and not goal_statut:\n", " non_perturbed_indexes = self.get_non_perturbed_indexes(\n", " initial_result, perturbed_indexes, non_usefull_indexes\n", " )\n", " (\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks_partial,\n", " rollback_found,\n", " ) = self.perturbed_index_swap(\n", " initial_result,\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " steps=self.rollback_level,\n", " )\n", " if len(sucsefull_attacks_partial) != 0:\n", " sucsefull_attacks.extend(sucsefull_attacks_partial)\n", " new_curent_sucsefull_attacks.extend(sucsefull_attacks_partial)\n", " # Action 2: the case where no rollback found we try to swap synonym and we aim to get better result\n", " if rollback_found == False:\n", " curent_result, synonyms, synonym_found = self.swap_to_synonym(\n", " curent_result, synonyms, perturbed_indexes\n", " )\n", " if synonym_found == True:\n", " sucsefull_attacks.append(curent_result)\n", " new_curent_sucsefull_attacks.append(curent_result)\n", "\n", " # if it's a failed attack we give chance for an other synonym\n", " # we will pass it for now because no improvment were found\n", " \"\"\"elif goal_statut == 1:\n", " curent_result, synonyms, goal_statut = self.swap_to_synonym(\n", " curent_result, synonyms, perturbed_indexes\n", " )\"\"\"\n", "\n", " if goal_statut == 0:\n", " sucsefull_attacks_text_only = [\n", " atk.attacked_text for atk in sucsefull_attacks\n", " ]\n", " self.successful_attacks[initial_result.attacked_text] = (\n", " sucsefull_attacks_text_only\n", " )\n", "\n", " if len(sucsefull_attacks) > 1:\n", " best_result = self.min_perturbation(\n", " sucsefull_attacks, initial_result.attacked_text\n", " )\n", " return best_result\n", " return curent_result\n", "\n", "\n", " def save_to_JSON(self, filename):\n", " data_list = []\n", " input_dict = {}\n", " for atk in self.successful_attacks:\n", " successful_attacks_with_scores = [\n", " (atk, score) for atk, score in self.successful_attacks[atk]\n", " ]\n", " input_dict[\" \".join(atk.words)] = successful_attacks_with_scores\n", " for original, samples in input_dict.items():\n", " samples_list = [\n", " {\"attacked_text\": \" \".join(text.words), \"score\": score}\n", " for text, score in samples\n", " ]\n", " data_list.append({\"original\": original, \"samples\": samples_list})\n", "\n", " # Save the formatted data to a JSON file\n", " with open(filename, \"w\") as json_file:\n", " json.dump({\"data\": data_list}, json_file, indent=4)\n", "\n", " def swap_to_synonym(self, curent_result, synonyms, perturbed_indexes):\n", " # giving chance to the second synonym of the most perturbated word if exists !\n", " found = False\n", " for index in perturbed_indexes:\n", " if index in synonyms and len(synonyms[index]) != 0:\n", " # what about other indexes we may give them chance too !\n", " synonym = max(synonyms[index], key=lambda x: x[0])\n", " new_attacked_text = curent_result.attacked_text.replace_word_at_index(\n", " index,\n", " synonym[1],\n", " )\n", "\n", " synonyms = self.update_synonyms(\n", " synonyms=synonyms,\n", " index_to_remove=index,\n", " curent_result=new_attacked_text,\n", " )\n", " found = True\n", " return new_attacked_text, synonyms, found\n", " # remove index with 0 synonymswithin the list\n", " synonyms.pop(index, None)\n", "\n", " return curent_result, synonyms, found\n", "\n", " def min_perturbation(self, results, original_text):\n", " # Initialize minimum score and result\n", " min_score = float(\"inf\")\n", " min_result = None\n", " original_text_splited = original_text.words\n", " for result in results:\n", " # Calculate perturbation as the number of words changed\n", " attacked_text = result.attacked_text\n", " perturbation = sum(\n", " i != j for i, j in zip(original_text_splited, attacked_text.words)\n", " )\n", "\n", " # Update minimum score and result if necessary\n", " if perturbation < min_score:\n", " min_score = perturbation\n", " min_result = result\n", "\n", " return min_result\n", "\n", " def check_transformation_compatibility(self, transformation):\n", " \"\"\"Since it ranks words by their importance, the algorithm is\n", " limited to word swap and deletion transformations.\"\"\"\n", " return transformation_consists_of_word_swaps_and_deletions(transformation)\n", "\n", " @property\n", " def is_black_box(self):\n", " if \"gradient\" in self.wir_method:\n", " return False\n", " else:\n", " return True\n", "\n", " def extra_repr_keys(self):\n", " return [\"wir_method\"]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\Isaac\\AppData\\Roaming\\Python\\Python38\\site-packages\\torch\\_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage()\n", " return self.fget.__get__(instance, owner)()\n", "textattack: Loading \u001b[94mdatasets\u001b[0m dataset \u001b[94mag_news\u001b[0m, split \u001b[94mtest\u001b[0m.\n" ] } ], "source": [ "import transformers\n", "from textattack.models.wrappers import HuggingFaceModelWrapper\n", "from textattack.models.helpers import *\n", "\n", "from textattack.datasets import HuggingFaceDataset\n", "\n", "\n", "pretrained_link = \"textattack/bert-base-uncased-ag-news\"\n", "\n", "data_set = \"ag_news\"\n", "model = transformers.AutoModelForSequenceClassification.from_pretrained(pretrained_link)\n", "tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained_link)\n", "\n", "model_wrapper = HuggingFaceModelWrapper(model, tokenizer)\n", "dataset = HuggingFaceDataset(data_set, None, \"test\", shuffle=False)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "textattack: Unknown if model of class compatible with goal function .\n" ] } ], "source": [ "# textfooler\n", "from textattack import Attack\n", "from textattack.constraints.grammaticality import PartOfSpeech\n", "from textattack.constraints.pre_transformation import (\n", " InputColumnModification,\n", " RepeatModification,\n", " StopwordModification,\n", ")\n", "from textattack.constraints.semantics import WordEmbeddingDistance\n", "from textattack.constraints.semantics.sentence_encoders import UniversalSentenceEncoder\n", "from textattack.goal_functions import UntargetedClassification\n", "from textattack.search_methods import GreedyWordSwapWIR\n", "from textattack.transformations import *\n", "\n", "# importing attacker \n", "from textattack.search_methods import *\n", "from tqdm import tqdm # tqdm provides us a nice progress bar.\n", "from textattack.loggers import CSVLogger # tracks a dataframe for us.\n", "from textattack.attack_results import SuccessfulAttackResult\n", "from textattack import Attacker\n", "from textattack import AttackArgs\n", "from textattack.datasets import Dataset\n", "\n", "transformation = WordSwapEmbedding(max_candidates=50)\n", "\n", "stopwords = set(\n", " [\"a\", \"about\", \"above\", \"across\", \"after\", \"afterwards\", \"again\", \"against\", \"ain\", \"all\", \"almost\", \"alone\", \"along\", \"already\", \"also\", \"although\", \"am\", \"among\", \"amongst\", \"an\", \"and\", \"another\", \"any\", \"anyhow\", \"anyone\", \"anything\", \"anyway\", \"anywhere\", \"are\", \"aren\", \"aren't\", \"around\", \"as\", \"at\", \"back\", \"been\", \"before\", \"beforehand\", \"behind\", \"being\", \"below\", \"beside\", \"besides\", \"between\", \"beyond\", \"both\", \"but\", \"by\", \"can\", \"cannot\", \"could\", \"couldn\", \"couldn't\", \"d\", \"didn\", \"didn't\", \"doesn\", \"doesn't\", \"don\", \"don't\", \"down\", \"due\", \"during\", \"either\", \"else\", \"elsewhere\", \"empty\", \"enough\", \"even\", \"ever\", \"everyone\", \"everything\", \"everywhere\", \"except\", \"first\", \"for\", \"former\", \"formerly\", \"from\", \"hadn\", \"hadn't\", \"hasn\", \"hasn't\", \"haven\", \"haven't\", \"he\", \"hence\", \"her\", \"here\", \"hereafter\", \"hereby\", \"herein\", \"hereupon\", \"hers\", \"herself\", \"him\", \"himself\", \"his\", \"how\", \"however\", \"hundred\", \"i\", \"if\", \"in\", \"indeed\", \"into\", \"is\", \"isn\", \"isn't\", \"it\", \"it's\", \"its\", \"itself\", \"just\", \"latter\", \"latterly\", \"least\", \"ll\", \"may\", \"me\", \"meanwhile\", \"mightn\", \"mightn't\", \"mine\", \"more\", \"moreover\", \"most\", \"mostly\", \"must\", \"mustn\", \"mustn't\", \"my\", \"myself\", \"namely\", \"needn\", \"needn't\", \"neither\", \"never\", \"nevertheless\", \"next\", \"no\", \"nobody\", \"none\", \"noone\", \"nor\", \"not\", \"nothing\", \"now\", \"nowhere\", \"o\", \"of\", \"off\", \"on\", \"once\", \"one\", \"only\", \"onto\", \"or\", \"other\", \"others\", \"otherwise\", \"our\", \"ours\", \"ourselves\", \"out\", \"over\", \"per\", \"please\", \"s\", \"same\", \"shan\", \"shan't\", \"she\", \"she's\", \"should've\", \"shouldn\", \"shouldn't\", \"somehow\", \"something\", \"sometime\", \"somewhere\", \"such\", \"t\", \"than\", \"that\", \"that'll\", \"the\", \"their\", \"theirs\", \"them\", \"themselves\", \"then\", \"thence\", \"there\", \"thereafter\", \"thereby\", \"therefore\", \"therein\", \"thereupon\", \"these\", \"they\", \"this\", \"those\", \"through\", \"throughout\", \"thru\", \"thus\", \"to\", \"too\", \"toward\", \"towards\", \"under\", \"unless\", \"until\", \"up\", \"upon\", \"used\", \"ve\", \"was\", \"wasn\", \"wasn't\", \"we\", \"were\", \"weren\", \"weren't\", \"what\", \"whatever\", \"when\", \"whence\", \"whenever\", \"where\", \"whereafter\", \"whereas\", \"whereby\", \"wherein\", \"whereupon\", \"wherever\", \"whether\", \"which\", \"while\", \"whither\", \"who\", \"whoever\", \"whole\", \"whom\", \"whose\", \"why\", \"with\", \"within\", \"without\", \"won\", \"won't\", \"would\", \"wouldn\", \"wouldn't\", \"y\", \"yet\", \"you\", \"you'd\", \"you'll\", \"you're\", \"you've\", \"your\", \"yours\", \"yourself\", \"yourselves\"]\n", ")\n", "\n", "constraints = [RepeatModification(), StopwordModification(stopwords=stopwords)]\n", "\n", "input_column_modification = InputColumnModification(\n", " [\"premise\", \"hypothesis\"], {\"premise\"}\n", ")\n", "constraints.append(input_column_modification)\n", "\n", "constraints.append(WordEmbeddingDistance(min_cos_sim=0.5))\n", "\n", "constraints.append(PartOfSpeech(allow_verb_noun_swap=True))\n", "\n", "use_constraint = UniversalSentenceEncoder(\n", " threshold=0.840845057,\n", " metric=\"angular\",\n", " compare_against_original=False,\n", " window_size=15,\n", " skip_text_shorter_than_window=True,\n", ")\n", "constraints.append(use_constraint)\n", "\n", "goal_function = UntargetedClassification(model_wrapper)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "\n", "import random\n", "\n", "class GreedyMultipleGeneration(SearchMethod):\n", " def __init__(\n", " self,\n", " wir_method=\"delete\",\n", " k=30,\n", " embed=None,\n", " naive_file=None,\n", " rollback_level=1,\n", " naive=False,\n", " clust=None,\n", " train_file=\"train_file.csv\",\n", " depth =5 ,\n", " ):\n", " self.wir_method = wir_method\n", " self.k = k # maximum iterations\n", " self.embed = embed # universal sentence encoder\n", " self.file = naive_file \n", " self.naive = naive\n", " self.rollback_level = rollback_level\n", " self.successful_attacks = {}\n", " self.clust = clust\n", " self.train_file = train_file\n", " self.depth = depth\n", "\n", " def _get_index_order(self, initial_text, indices_to_order):\n", " \"\"\"Returns word indices of ``initial_text`` in descending order of\n", " importance.\"\"\"\n", "\n", " if \"unk\" in self.wir_method:\n", " leave_one_texts = [\n", " initial_text.replace_word_at_index(i, \"[UNK]\") for i in indices_to_order\n", " ]\n", " leave_one_results, search_over = self.get_goal_results(leave_one_texts)\n", " index_scores = np.array([result.score for result in leave_one_results])\n", "\n", " elif \"delete\" in self.wir_method:\n", " leave_one_texts = [\n", " initial_text.delete_word_at_index(i) for i in indices_to_order\n", " ]\n", " leave_one_results, search_over = self.get_goal_results(leave_one_texts)\n", " # print(f\"leave_one_results : {leave_one_results}\")\n", " # print(f\"search_over : {search_over}\")\n", "\n", " index_scores = np.array([result.score for result in leave_one_results])\n", "\n", " elif \"weighted-saliency\" in self.wir_method:\n", " # first, compute word saliency\n", " leave_one_texts = [\n", " initial_text.replace_word_at_index(i, \"unk\") for i in indices_to_order\n", " ]\n", " leave_one_results, search_over = self.get_goal_results(leave_one_texts)\n", " saliency_scores = np.array([result.score for result in leave_one_results])\n", "\n", " softmax_saliency_scores = softmax(\n", " torch.Tensor(saliency_scores), dim=0\n", " ).numpy()\n", "\n", " # compute the largest change in score we can find by swapping each word\n", " delta_ps = []\n", " for idx in indices_to_order:\n", " # Exit Loop when search_over is True - but we need to make sure delta_ps\n", " # is the same size as softmax_saliency_scores\n", " if search_over:\n", " delta_ps = delta_ps + [0.0] * (\n", " len(softmax_saliency_scores) - len(delta_ps)\n", " )\n", " break\n", "\n", " transformed_text_candidates = self.get_transformations(\n", " initial_text,\n", " original_text=initial_text,\n", " indices_to_modify=[idx],\n", " )\n", " if not transformed_text_candidates:\n", " # no valid synonym substitutions for this word\n", " delta_ps.append(0.0)\n", " continue\n", " swap_results, search_over = self.get_goal_results(\n", " transformed_text_candidates\n", " )\n", " score_change = [result.score for result in swap_results]\n", " if not score_change:\n", " delta_ps.append(0.0)\n", " continue\n", " max_score_change = np.max(score_change)\n", " delta_ps.append(max_score_change)\n", "\n", " index_scores = softmax_saliency_scores * np.array(delta_ps)\n", "\n", " elif \"gradient\" in self.wir_method:\n", " print(\"calculating gradient\") \n", " victim_model = self.get_victim_model()\n", "\n", " index_scores = np.zeros(len(indices_to_order))\n", " grad_output = victim_model.get_grad(initial_text.tokenizer_input)\n", " gradient = grad_output[\"gradient\"]\n", " word2token_mapping = initial_text.align_with_model_tokens(victim_model)\n", " for i, index in enumerate(indices_to_order):\n", " matched_tokens = word2token_mapping[index]\n", " if not matched_tokens:\n", " index_scores[i] = 0.0\n", " else:\n", " agg_grad = np.mean(gradient[matched_tokens], axis=0)\n", " index_scores[i] = np.linalg.norm(agg_grad, ord=1)\n", "\n", " search_over = False\n", "\n", " index_order = np.array(indices_to_order)[(-index_scores).argsort()]\n", " index_scores = sorted(index_scores, reverse=True)\n", " return index_order, search_over, index_scores\n", "\n", " # This present a rollback for reducing perturbation only\n", " def swap_to_origin(self, cur_result, initial_result, index):\n", " \"\"\"Replace the chosen word with it origin a return a result instance\"\"\"\n", " \n", " new_attacked_text = cur_result.attacked_text.replace_word_at_index(\n", " index, initial_result.attacked_text.words[index]\n", " )\n", " result, _ = self.get_goal_results([new_attacked_text])\n", " return result[0]\n", "\n", " def check_synonym_validity(\n", " ind, ind_synonym, Synonym_indices, Current_attacked_Results, j, synonym\n", " ):\n", " \"\"\"Checks if a synonym is valid for a given index in the attacked text.\n", "\n", " Args:\n", " ind: The index of the word in the attacked text.\n", " ind_synonym: The index of the synonym in the list of synonyms.\n", " Synonym_indices: A dictionary of synonym indices.\n", " Current_attacked_Results: A list of AttackedResult objects.\n", " j: The index of the current AttackedResult object in the list.\n", " synonym: The synonym to check.\n", "\n", " Returns:\n", " True if the synonym is valid, False otherwise.\"\"\"\n", "\n", " # Check if the synonym has already been chosen.\n", " if (ind, ind_synonym) in Synonym_indices:\n", " return False\n", "\n", " # Get the current attacked text and its words.\n", " current_attacked_text = Current_attacked_Results[j].attacked_text\n", " current_attacked_words = current_attacked_text.words\n", "\n", " # Check if the synonym is already present in the attacked text.\n", " if synonym in current_attacked_words[ind]:\n", " return False\n", "\n", " return True\n", "\n", " def generate_naive_attack(self, initial_result):\n", " curent_result = initial_result\n", " # dict of preturbed indexes with theire scores on on the original text\n", " perturbed_indexes = {}\n", " # possible synonyms of each index with theire scores on the original text to reduce avg num queries\n", " synonyms = {}\n", " # to track indexes with no transformation so we avoid recalculate them to reduce avg num queries\n", " non_usefull_indexes = []\n", " attacked_text = initial_result.attacked_text\n", " _, indices_to_order = self.get_indices_to_order(attacked_text)\n", "\n", " # Sort words by order of importance\n", "\n", " index_order, search_over, _ = self._get_index_order(\n", " attacked_text, indices_to_order\n", " )\n", "\n", " # iterate through words by theire importance\n", " for index in index_order:\n", " if search_over:\n", " break\n", " transformed_text_candidates = self.get_transformations(\n", " curent_result.attacked_text,\n", " original_text=initial_result.attacked_text,\n", " indices_to_modify=[index],\n", " )\n", "\n", " if len(transformed_text_candidates) == 0:\n", " # track unusefull words to optimize the code .\n", " non_usefull_indexes.append(index)\n", " continue\n", " else:\n", " results, search_over = self.get_goal_results(\n", " transformed_text_candidates\n", " )\n", "\n", " max_result = max(results, key=lambda x: x.score)\n", "\n", " if max_result.score > curent_result.score:\n", " if self.naive == False:\n", " # store perturbed indexes with theire score\n", " perturbed_indexes[index] = max_result.score - curent_result.score\n", " # add all synonyms except the one we ve been using\n", " synonyms[index] = [\n", " (results[i].score - curent_result.score, trans.words[index])\n", " for i, trans in enumerate(transformed_text_candidates)\n", " if trans.words[index] != max_result.attacked_text.words[index]\n", " ]\n", "\n", " curent_result = max_result\n", "\n", " if curent_result.goal_status == GoalFunctionResultStatus.SUCCEEDED:\n", " return (\n", " curent_result,\n", " perturbed_indexes,\n", " non_usefull_indexes,\n", " synonyms,\n", " curent_result.goal_status,\n", " )\n", "\n", " return (\n", " curent_result,\n", " perturbed_indexes,\n", " non_usefull_indexes,\n", " synonyms,\n", " curent_result.goal_status,\n", " )\n", "\n", " # TODO we can add depth to track how many words rolled back for more statistics\n", "\n", " def perturbed_index_swap(\n", " self,\n", " initial_result,\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " steps,\n", " depth,\n", " ):\n", "\n", " \n", " past_curent_result = curent_result\n", " # the index with minimum perturbation\n", " rollback_found = False\n", " index_swap_found = False\n", " steps = min(steps, len(perturbed_indexes) - 1)\n", " sucsefull_attacks = []\n", " \"\"\"current_index_order, _ ,current_score = self._get_index_order(\n", " curent_result.attacked_text, perturbed_indexes\n", " )\"\"\"\n", " \n", " rollback_indices = sorted(perturbed_indexes , key=perturbed_indexes.get, reverse=False)[:depth]\n", " #rollback_index = min(perturbed_indexes, key=perturbed_indexes.get) # rollback index of the rolback action\n", " # TODO remove from perturbed_indexes list and add it to non_perturbed_indexes but with punalitié\n", " # how punalité should look like ? it could be at the end of the quee with visited flag\n", " # or we can just eliminate it .\n", " #perturbed_indexes.pop(rollback_index, None)\n", " \n", " for rollback_index in rollback_indices :\n", " \n", " result_rollback = self.swap_to_origin(\n", " curent_result, initial_result, rollback_index\n", " )\n", "\n", " if result_rollback.goal_status == GoalFunctionResultStatus.SUCCEEDED:\n", " # remove synonyms of the rolledback index\n", " rollback_found = True\n", " synonyms = self.update_synonyms(\n", " synonyms,\n", " None,\n", " rollback_index,\n", " result_rollback,\n", " None,\n", " None,\n", " )\n", " # stock this sucssefull attack\n", " sucsefull_attacks.append(result_rollback) # attention k+1\n", " break\n", "\n", " current_index_order, _ ,current_score = self._get_index_order(\n", " curent_result.attacked_text, non_perturbed_indexes\n", " )\n", " \n", " non_perturbed_indexes = current_index_order.tolist()\n", " for _ in range(steps):\n", " # TODO getting the least important perturbated word in the new attacked sample costs a lot\n", " # a faire : choisir trois index a retirer\n", "\n", " for index in non_perturbed_indexes:\n", " if index_swap_found == True: \n", " break\n", " # early returning\n", " \"\"\" if len(perturbed_indexes) == 0:\n", " return (\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks,\n", " rollback_found,\n", " )\"\"\"\n", "\n", " # get candidates for non perturbed word\n", " transformed_text_candidates = self.get_transformations(\n", " curent_result.attacked_text,\n", " original_text=initial_result.attacked_text,\n", " indices_to_modify=[index],\n", " )\n", "\n", " if len(transformed_text_candidates) == 0:\n", " non_perturbed_indexes.remove(index)\n", " continue # wa7ed ma chaf wa7ed\n", "\n", " results, _ = self.get_goal_results(transformed_text_candidates)\n", "\n", " # we add one perturbed word\n", " #max_result = max(results, key=lambda x: x.score)\n", "\n", " \n", " \"\"\"for res in results:\n", " if res.score > curent_result.score:\n", " if res.goal_status == GoalFunctionResultStatus.SUCCEEDED:\n", " # add synonyms of the added perturbed index\n", " synonyms = self.update_synonyms(\n", " synonyms=synonyms,\n", " index_to_add=index,\n", " index_to_remove=None,\n", " curent_result=res,\n", " results=results, # k+1 indice !\n", " transformed_text_candidates=transformed_text_candidates,\n", " )\"\"\"\n", " # If the tuple is not found, do nothing \n", " # stock this sucssefull attack\n", " #sucsefull_attacks.append(res)\n", " # we get better score (k+1 perturbation)\n", " max_number = min(depth, len(results))\n", " max_results = sorted(results, key=lambda x: x.score, reverse=True)[:max_number]\n", " for max_result in max_results:\n", " #if max_result.score > curent_result.score:\n", " if max_result.goal_status == GoalFunctionResultStatus.SUCCEEDED: \n", " # eplore minimum perturbation on the original text\n", " inferior = min(perturbed_indexes, key=perturbed_indexes.get) # rollback index of the index swap action\n", " try:\n", " non_perturbed_indexes.remove(index) # remove perturbed index\n", " except ValueError:\n", " continue \n", " perturbed_indexes[index] = max_result.score - curent_result.score\n", " # restore one perturbed k\n", "\n", " perturbed_indexes.pop(inferior, None)\n", "\n", " new_attacked_text = (\n", " max_result.attacked_text.replace_word_at_index(\n", " inferior,\n", " initial_result.attacked_text.words[inferior],\n", " )\n", " )\n", "\n", " result, _ = self.get_goal_results([new_attacked_text]) # index swap =k\n", "\n", " result_index_swap = max(result, key=lambda x: x.score) # rollback\n", " #for res in result: # une seule !!!\n", "\n", " if result_index_swap.goal_status == GoalFunctionResultStatus.SUCCEEDED:\n", " index_swap_found = True\n", " \n", " synonyms = self.update_synonyms(\n", " synonyms,\n", " index,\n", " inferior,\n", " result_index_swap,\n", " results,\n", " transformed_text_candidates,\n", " )\n", " \n", " # stock this sucssefull attack\n", " sucsefull_attacks.append(result_index_swap) # k \n", " curent_result = result_index_swap\n", " break\n", " \n", " \"\"\"if (\n", " result_index_swap.goal_status\n", " == GoalFunctionResultStatus.SUCCEEDED\n", " ):\n", " index_swap_found = True\n", " synonyms = self.update_synonyms(\n", " synonyms,\n", " index,\n", " inferior,\n", " result_index_swap,\n", " results,\n", " transformed_text_candidates,\n", " )\"\"\"\n", " \n", " if rollback_found:\n", " perturbed_indexes.pop(rollback_index, None)\n", "\n", " \"\"\"if index_swap_found or rollback_found:\n", "\n", " return (\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks,\n", " True,\n", "\n", " ) \"\"\"\n", "\n", " if rollback_found:\n", " return (\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks,\n", " True,\n", " )\n", "\n", " if index_swap_found :\n", "\n", " return (\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks,\n", " False,\n", " )\n", " \n", " return (\n", " past_curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks,\n", " False,\n", " )\n", "\n", " def update_synonyms(\n", " self,\n", " synonyms,\n", " index_to_add=None,\n", " index_to_remove=None,\n", " curent_result=None,\n", " results=None,\n", " transformed_text_candidates=None,\n", " ):\n", " \"\"\"Return an updated list of synonyms\"\"\"\n", " if index_to_remove in synonyms and len(synonyms[index_to_remove]) != 0:\n", " # remove the used synonym of certain index\n", " synonyms[index_to_remove] = [\n", " syn for syn in synonyms[index_to_remove]\n", " if syn[1] != curent_result.attacked_text.words[index_to_remove]\n", " ]\n", "\n", " # add synonyms of new perturbated word with their score\n", " if index_to_add is not None and transformed_text_candidates is not None:\n", " try:\n", " synonyms[index_to_add] = [\n", " (results[i].score - curent_result.score , trans.words[index_to_add])\n", " for i, trans in enumerate(transformed_text_candidates)\n", " if trans.words[index_to_add] != curent_result.attacked_text.words[index_to_add] \n", " ]\n", " \n", " except ValueError:\n", " pass\n", " return synonyms\n", "\n", " def get_non_perturbed_indexes(\n", " self, initial_result, perturbed_indexes, non_usefull_indexes\n", " ):\n", " \"\"\"Return a list of non perturbed indexes\"\"\"\n", " all_indexes = set(range(len(initial_result.attacked_text.words)))\n", " perturbed_indexes_set = set(perturbed_indexes.keys())\n", " non_usefull_indexes_set = set(non_usefull_indexes)\n", " non_perturbed_indexes = list(\n", " all_indexes - perturbed_indexes_set - non_usefull_indexes_set\n", " )\n", " return non_perturbed_indexes\n", "\n", " def perform_search(self, initial_result):\n", " \n", " start_time = time.time() \n", " (\n", " curent_result,\n", " perturbed_indexes,\n", " non_usefull_indexes,\n", " synonyms,\n", " goal_statut,\n", " ) = self.generate_naive_attack(initial_result)\n", " sucsefull_attacks = [curent_result]\n", " naive_result = curent_result \n", " naive_time = time.time() - start_time\n", " new_curent_sucsefull_attacks = [curent_result]\n", " \n", " #k = floor((len(initial_result.attacked_text.words)/2)) + 1\n", " k = len(perturbed_indexes)\n", " if not self.naive:\n", " # perturbed_index_swap is our 1s priority (in case of attack succeed goal_statut = 0 )\n", " #for i in range(len(perturbed_indexes)):\n", " for i in range(k):\n", " print(\"i \" +str(i))\n", " non_perturbed_indexes = self.get_non_perturbed_indexes( initial_result, perturbed_indexes, non_usefull_indexes )\n", " \n", " if len(new_curent_sucsefull_attacks) != 0:\n", " # how to decide on the next text to be treated here we work on the the one with max score\n", " curent_result = random.choice(new_curent_sucsefull_attacks)\n", " \n", " \"\"\"curent_result = max(\n", " new_curent_sucsefull_attacks, key=lambda x: x.score\n", " )\"\"\"\n", " \n", " new_curent_sucsefull_attacks.remove(curent_result)\n", " else:\n", " break \n", " \"\"\"print(\"type 00 \" +str(type(curent_result)))\n", " curent_result, synonyms, synonym_found = self.swap_to_synonym(\n", " curent_result, synonyms, perturbed_indexes\n", " )\n", " if synonym_found == True:\n", " sucsefull_attacks.append(curent_result)\n", " new_curent_sucsefull_attacks.append(curent_result)\n", " continue\n", " print(\"type 01 \" +str(type(curent_result)))\"\"\"\n", "\n", " if len(perturbed_indexes) > 1 and not goal_statut:\n", " \n", " \"\"\"non_perturbed_indexes = self.get_non_perturbed_indexes( initial_result, perturbed_indexes, non_usefull_indexes )\"\"\"\n", " \n", " (\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " sucsefull_attacks_partial,\n", " rollback_found,\n", " ) = self.perturbed_index_swap(\n", " initial_result,\n", " curent_result,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " steps=self.rollback_level,\n", " depth =self.depth,\n", " )\n", " if len(sucsefull_attacks_partial) != 0:\n", " sucsefull_attacks.extend(sucsefull_attacks_partial)\n", " new_curent_sucsefull_attacks.extend(sucsefull_attacks_partial)\n", " \n", " \n", " # Action 2: the case where no rollback found we try to swap synonym and we aim to get better result\n", " #if rollback_found == False:\n", " curent_results, synonyms, synonym_found = self.swap_to_synonym(\n", " curent_result, synonyms, perturbed_indexes, depth = self.depth\n", " )\n", " if synonym_found == True:\n", " sucsefull_attacks.extend(curent_results) \n", " new_curent_sucsefull_attacks.extend(curent_results) \n", "\n", " # if it's a failed attack we give chance for an other synonym\n", " # we will pass it for now because no improvment were found\n", " \"\"\"elif goal_statut == 1:\n", " curent_result, synonyms, goal_statut = self.swap_to_synonym(\n", " curent_result, synonyms, perturbed_indexes\n", " )\"\"\"\n", "\n", " if goal_statut == 0:\n", " sucsefull_attacks_text_scores = []\n", " sucsefull_attacks_text_scores = [\n", " (atk.attacked_text, atk.score)\n", " for atk in sucsefull_attacks\n", " if atk.score > 0.5\n", " ]\n", "\n", " sucsefull_attacks_text_scores = list(set(sucsefull_attacks_text_scores))\n", "\n", " self.successful_attacks[initial_result.attacked_text] = (\n", " sucsefull_attacks_text_scores\n", " )\n", " ground_truth_output = sucsefull_attacks[0].ground_truth_output\n", "\n", " self.save_to_train(\n", " initial_result,\n", " sucsefull_attacks_text_scores,\n", " ground_truth_output,\n", " )\n", "\n", " our_time = time.time() - start_time\n", " if self.file != None : \n", " with open(self.file, mode='a', newline='') as file:\n", " writer = csv.writer(file)\n", " row = [str(naive_time),str(our_time),str(naive_result.num_queries),naive_result.attacked_text.words_diff_ratio(initial_result.attacked_text)] \n", " writer.writerow(row)\n", " \n", " file.close() \n", " \n", " return naive_result\n", " #try:\n", " #best_result = self.min_perturbation(\n", " #sucsefull_attacks, initial_result.attacked_text\n", " #)\n", " #return best_result\n", " #except:\n", " #return curent_result\n", "\n", " def save_to_train(\n", " self,\n", " original_text,\n", " sucsefull_attacks_text_scores,\n", " ground_truth_output,\n", "\n", " ):\n", " successful_attacks = {\n", " original_text.attacked_text: sucsefull_attacks_text_scores\n", " }\n", " self.save_to_JSON(filename=\"temp.json\", successful_attacks=successful_attacks)\n", "\n", " #self.pipeline(ground_truth_output, self.train_file)\n", " with open(self.train_file, mode='a', newline='') as file:\n", " writer = csv.writer(file)\n", " for col1 in sucsefull_attacks_text_scores :\n", " writer.writerow([col1, ground_truth_output])\n", "\n", "\n", " def pipeline(self, ground_truth_output, train_file):\n", " \n", " clust = Clustering(\n", " file_=\"temp.json\",\n", " victim_model_wrapper=model_wrapper,\n", " victim_model=model,\n", " attack=attack,\n", " )\n", "\n", " clust.file_ = \"temp.json\"\n", " sentence_embedding_vectors, masks, scores = clust.prepare_sentences()\n", "\n", " unified_mask = clust.get_global_unified_masks(masks=masks)\n", "\n", " sentences = clust.apply_mask_on_global_vectors(\n", " global_sentences=sentence_embedding_vectors, unified_masks=unified_mask\n", " )\n", "\n", " sentences = clust.global_matrix_to_global_sentences(\n", " global_matrix_sentences=sentences\n", " )\n", "\n", " global_clustering = clust.find_global_best_clustering(\n", " sentences, 10, \"thumb-rule\"\n", " )\n", "\n", " selected_samples = clust.global_select_diverce_sample(\n", " scores, sentences, global_clustering\n", " )\n", " \n", " \n", " clust.save_csv(selected_samples, ground_truth_output, train_file)\n", "\n", " def save_to_JSON(self, filename, successful_attacks):\n", " data_list = []\n", " input_dict = {}\n", " for atk in successful_attacks:\n", " successful_attacks_with_scores = [ (atk, score) for atk, score in successful_attacks[atk] ]\n", " input_dict[\" \".join(atk.words)] = successful_attacks_with_scores\n", " for original, samples in input_dict.items():\n", " samples_list = [ {\"attacked_text\": \" \".join(text.words), \"score\": score} for text, score in samples ]\n", " data_list.append({\"original\": original, \"samples\": samples_list})\n", "\n", " # Save the formatted data to a JSON file\n", " with open(filename, \"w\") as json_file:\n", " json.dump({\"data\": data_list}, json_file, indent=4)\n", "\n", " def swap_to_synonym(self, curent_result, synonyms, perturbed_indexes, depth=5):\n", " # giving chance to the second synonym of the most perturbated word if exists !\n", " found = False\n", " attacked_results = []\n", " for index in perturbed_indexes:\n", " if index in synonyms and len(synonyms[index]) != 0:\n", " # what about other indexes we may give them chance too !\n", " # response : experiments shows that there is no much improvment taking in consideration the high increase of avg Q-num\n", " #synonym = max(synonyms[index], key=lambda x: x[0])\n", " min_syn = min(depth,len(synonyms[index]))\n", " #best_synonyms = collections.OrderedDict(sorted(synonyms.items()), key=lambda x: x[0])\n", " best_synonyms = sorted(synonyms[index], key=lambda x: x[0], reverse=False)[:min_syn]\n", " for synonym in best_synonyms :\n", " new_attacked_text = ( curent_result.attacked_text.replace_word_at_index(index,synonym[1],))\n", " new_result, _ = self.get_goal_results([new_attacked_text])\n", " if new_result[0].goal_status == GoalFunctionResultStatus.SUCCEEDED:\n", " found = True\n", " attacked_results.append(new_result[0])\n", " #return new_attacked_text, synonyms, found\n", "\n", " # Here we remove the synonym no matter what \n", " # synonyms = self.update_synonyms(synonyms=synonyms,index_to_remove=index,curent_result=new_result[0],)\n", "\n", " # after checking the selected synonyms we remove the index from the synonm list \n", " synonyms.pop(index, None) \n", " \n", " if found == True :\n", " return attacked_results, synonyms, found\n", " \n", " return [curent_result], synonyms, found\n", "\n", " def min_perturbation(self, results, original_text):\n", " # Initialize minimum score and result\n", " min_score = float(\"inf\")\n", " min_result = None\n", " original_text_splited = original_text.words\n", " for result in results:\n", " # Calculate perturbation as the number of words changed\n", " attacked_text = result.attacked_text\n", " perturbation = sum( i != j for i, j in zip(original_text_splited, attacked_text.words) )\n", "\n", " # Update minimum score and result if necessary\n", " if perturbation < min_score:\n", " min_score = perturbation\n", " min_result = result\n", "\n", " return min_result\n", "\n", " def check_transformation_compatibility(self, transformation):\n", " \"\"\"Since it ranks words by their importance, the algorithm is\n", " limited to word swap and deletion transformations.\"\"\"\n", " return transformation_consists_of_word_swaps_and_deletions(transformation)\n", "\n", " def random_selection(\n", " self,\n", " non_perturbed_indexes,\n", " perturbed_indexes,\n", " synonyms,\n", " curent_result,\n", " initial_result,\n", " ):\n", " max_iterations = len(non_perturbed_indexes)\n", " sample_found = False\n", " for _ in range(max_iterations):\n", " random_index = random.choice(non_perturbed_indexes)\n", " transformed_text_candidates = self.get_transformations(\n", " curent_result.attacked_text,\n", " original_text=initial_result.attacked_text,\n", " indices_to_modify=[random_index], )\n", " if len(transformed_text_candidates) == 0:\n", " non_perturbed_indexes.remove(random_index)\n", " continue\n", "\n", " results, _ = self.get_goal_results([transformed_text_candidates[0]])\n", "\n", " # we add one perturbed word\n", " max_result = max(results, key=lambda x: x.score)\n", " sample_found = True\n", " # update synonym\n", " synonyms = self.update_synonyms(\n", " synonyms=synonyms,\n", " index_to_add=random_index,\n", " curent_result=curent_result,\n", " results=results,\n", " transformed_text_candidates=[transformed_text_candidates[0]], )\n", "\n", " # penalty on existing indexes\n", " for index in perturbed_indexes:\n", " perturbed_indexes[index] = perturbed_indexes[index] * 0.9\n", "\n", " perturbed_indexes[random_index] = max_result.score - curent_result.score\n", " non_perturbed_indexes.remove(random_index)\n", "\n", " return ( non_perturbed_indexes, perturbed_indexes, synonyms, max_result, sample_found,)\n", "\n", " return ( non_perturbed_indexes, perturbed_indexes, synonyms, curent_result, sample_found, )\n", "\n", " @property\n", " def is_black_box(self):\n", " if \"gradient\" in self.wir_method:\n", " return False\n", " else:\n", " successful_attacks = { original_text.attacked_text: sucsefull_attacks_text_scores }\n", " return True\n", "\n", " def extra_repr_keys(self):\n", " return [\"wir_method\"]\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import torch\n", "from torch.nn.functional import softmax\n", "import time\n", "from textattack.goal_function_results import GoalFunctionResultStatus\n", "from textattack.search_methods import SearchMethod\n", "from textattack.shared.validators import (\n", " transformation_consists_of_word_swaps_and_deletions,\n", ")\n", "from sklearn.metrics.pairwise import cosine_similarity\n", "import numpy as np\n", "import torch\n", "from torch.nn.functional import softmax\n", "import json\n", "\n", "from textattack.goal_function_results import GoalFunctionResultStatus\n", "from textattack.search_methods import SearchMethod\n", "from textattack.shared.validators import (\n", " transformation_consists_of_word_swaps_and_deletions,\n", ")\n", "from textattack.shared.validators import (\n", " transformation_consists_of_word_swaps_and_deletions,\n", ")\n", "import csv\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "\n", "\n", "import sys\n", "\n", "sys.setrecursionlimit(3000)\n", "lines = 1\n", "\n", "from textattack.search_methods import *\n", "from textattack import AttackArgs\n", "\n", "attack_name = \"GreedyMultipleGeneration\"\n", "\n", "checkpoint_path = \"\" \n", "checkpoint_file = checkpoint_path+\"/1681827402201.ta.chkpt\" \n", "train_file = checkpoint_path+\"train_file.csv\"\n", "naive_file = checkpoint_path+\"naive_file.csv\"\n", "clust_file = checkpoint_path+attack_name+\".json\"\n", "\n", "search_method = GreedyMultipleGeneration(\n", " wir_method=\"gradient\", embed=None, naive=True, depth =3, rollback_level=1, train_file= train_file, naive_file = naive_file\n", ")\n", "\n", "\n", "attack_args = AttackArgs( num_examples=lines,checkpoint_interval = 200,checkpoint_dir =checkpoint_path) \n", "attack = Attack(goal_function, constraints, transformation, search_method)\n" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Attack(\n", " (search_method): GreedyMultipleGeneration(\n", " (wir_method): gradient\n", " )\n", " (goal_function): UntargetedClassification\n", " (transformation): WordSwapEmbedding(\n", " (max_candidates): 50\n", " (embedding): WordEmbedding\n", " )\n", " (constraints): \n", " (0): WordEmbeddingDistance(\n", " (embedding): WordEmbedding\n", " (min_cos_sim): 0.5\n", " (cased): False\n", " (include_unknown_words): True\n", " (compare_against_original): True\n", " )\n", " (1): PartOfSpeech(\n", " (tagger_type): nltk\n", " (tagset): universal\n", " (allow_verb_noun_swap): True\n", " (compare_against_original): True\n", " )\n", " (2): UniversalSentenceEncoder(\n", " (metric): angular\n", " (threshold): 0.840845057\n", " (window_size): 15\n", " (skip_text_shorter_than_window): True\n", " (compare_against_original): False\n", " )\n", " (3): RepeatModification\n", " (4): StopwordModification\n", " (5): InputColumnModification(\n", " (matching_column_labels): ['premise', 'hypothesis']\n", " (columns_to_ignore): {'premise'}\n", " )\n", " (is_black_box): False\n", ") \n", "\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/1 [00:00 7\u001b[0m \u001b[43mattacker\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattack_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\attacker.py:441\u001b[0m, in \u001b[0;36mAttacker.attack_dataset\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 439\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_attack_parallel()\n\u001b[0;32m 440\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 441\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_attack\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 443\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mattack_args\u001b[38;5;241m.\u001b[39msilent:\n\u001b[0;32m 444\u001b[0m logger\u001b[38;5;241m.\u001b[39msetLevel(logging\u001b[38;5;241m.\u001b[39mINFO)\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\attacker.py:170\u001b[0m, in \u001b[0;36mAttacker._attack\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 168\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mattack\u001b[38;5;241m.\u001b[39mattack(example, ground_truth_output)\n\u001b[0;32m 169\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m--> 170\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[0;32m 171\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[0;32m 172\u001b[0m \u001b[38;5;28misinstance\u001b[39m(result, SkippedAttackResult) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mattack_args\u001b[38;5;241m.\u001b[39mattack_n\n\u001b[0;32m 173\u001b[0m ) \u001b[38;5;129;01mor\u001b[39;00m (\n\u001b[0;32m 174\u001b[0m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(result, SuccessfulAttackResult)\n\u001b[0;32m 175\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mattack_args\u001b[38;5;241m.\u001b[39mnum_successful_examples\n\u001b[0;32m 176\u001b[0m ):\n\u001b[0;32m 177\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m worklist_candidates:\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\attacker.py:168\u001b[0m, in \u001b[0;36mAttacker._attack\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 166\u001b[0m example\u001b[38;5;241m.\u001b[39mattack_attrs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlabel_names\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdataset\u001b[38;5;241m.\u001b[39mlabel_names\n\u001b[0;32m 167\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 168\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattack\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattack\u001b[49m\u001b[43m(\u001b[49m\u001b[43mexample\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mground_truth_output\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 169\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m 170\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\attack.py:450\u001b[0m, in \u001b[0;36mAttack.attack\u001b[1;34m(self, example, ground_truth_output)\u001b[0m\n\u001b[0;32m 448\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m SkippedAttackResult(goal_function_result)\n\u001b[0;32m 449\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 450\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_attack\u001b[49m\u001b[43m(\u001b[49m\u001b[43mgoal_function_result\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 451\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m result\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\attack.py:398\u001b[0m, in \u001b[0;36mAttack._attack\u001b[1;34m(self, initial_result)\u001b[0m\n\u001b[0;32m 387\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_attack\u001b[39m(\u001b[38;5;28mself\u001b[39m, initial_result):\n\u001b[0;32m 388\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Calls the ``SearchMethod`` to perturb the ``AttackedText`` stored in\u001b[39;00m\n\u001b[0;32m 389\u001b[0m \u001b[38;5;124;03m ``initial_result``.\u001b[39;00m\n\u001b[0;32m 390\u001b[0m \n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 396\u001b[0m \u001b[38;5;124;03m or ``MaximizedAttackResult``.\u001b[39;00m\n\u001b[0;32m 397\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m--> 398\u001b[0m final_result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msearch_method\u001b[49m\u001b[43m(\u001b[49m\u001b[43minitial_result\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 399\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclear_cache()\n\u001b[0;32m 400\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m final_result\u001b[38;5;241m.\u001b[39mgoal_status \u001b[38;5;241m==\u001b[39m GoalFunctionResultStatus\u001b[38;5;241m.\u001b[39mSUCCEEDED:\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\search_methods\\search_method.py:35\u001b[0m, in \u001b[0;36mSearchMethod.__call__\u001b[1;34m(self, initial_result)\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfilter_transformations\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m 31\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mAttributeError\u001b[39;00m(\n\u001b[0;32m 32\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSearch Method must have access to filter_transformations method\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 33\u001b[0m )\n\u001b[1;32m---> 35\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mperform_search\u001b[49m\u001b[43m(\u001b[49m\u001b[43minitial_result\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 36\u001b[0m \u001b[38;5;66;03m# ensure that the number of queries for this GoalFunctionResult is up-to-date\u001b[39;00m\n\u001b[0;32m 37\u001b[0m result\u001b[38;5;241m.\u001b[39mnum_queries \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgoal_function\u001b[38;5;241m.\u001b[39mnum_queries\n", "Cell \u001b[1;32mIn[4], line 485\u001b[0m, in \u001b[0;36mGreedyMultipleGeneration.perform_search\u001b[1;34m(self, initial_result)\u001b[0m\n\u001b[0;32m 476\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mperform_search\u001b[39m(\u001b[38;5;28mself\u001b[39m, initial_result):\n\u001b[0;32m 478\u001b[0m start_time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime() \n\u001b[0;32m 479\u001b[0m (\n\u001b[0;32m 480\u001b[0m curent_result,\n\u001b[0;32m 481\u001b[0m perturbed_indexes,\n\u001b[0;32m 482\u001b[0m non_usefull_indexes,\n\u001b[0;32m 483\u001b[0m synonyms,\n\u001b[0;32m 484\u001b[0m goal_statut,\n\u001b[1;32m--> 485\u001b[0m ) \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate_naive_attack\u001b[49m\u001b[43m(\u001b[49m\u001b[43minitial_result\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 486\u001b[0m sucsefull_attacks \u001b[38;5;241m=\u001b[39m [curent_result]\n\u001b[0;32m 487\u001b[0m naive_result \u001b[38;5;241m=\u001b[39m curent_result \n", "Cell \u001b[1;32mIn[4], line 175\u001b[0m, in \u001b[0;36mGreedyMultipleGeneration.generate_naive_attack\u001b[1;34m(self, initial_result)\u001b[0m\n\u001b[0;32m 173\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m search_over:\n\u001b[0;32m 174\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n\u001b[1;32m--> 175\u001b[0m transformed_text_candidates \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_transformations\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 176\u001b[0m \u001b[43m \u001b[49m\u001b[43mcurent_result\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattacked_text\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 177\u001b[0m \u001b[43m \u001b[49m\u001b[43moriginal_text\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minitial_result\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattacked_text\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 178\u001b[0m \u001b[43m \u001b[49m\u001b[43mindices_to_modify\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43mindex\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 179\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 181\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(transformed_text_candidates) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m 182\u001b[0m \u001b[38;5;66;03m# track unusefull words to optimize the code .\u001b[39;00m\n\u001b[0;32m 183\u001b[0m non_usefull_indexes\u001b[38;5;241m.\u001b[39mappend(index)\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\attack.py:315\u001b[0m, in \u001b[0;36mAttack.get_transformations\u001b[1;34m(self, current_text, original_text, **kwargs)\u001b[0m\n\u001b[0;32m 310\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m 311\u001b[0m transformed_texts \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_transformations_uncached(\n\u001b[0;32m 312\u001b[0m current_text, original_text, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs\n\u001b[0;32m 313\u001b[0m )\n\u001b[1;32m--> 315\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfilter_transformations\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 316\u001b[0m \u001b[43m \u001b[49m\u001b[43mtransformed_texts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcurrent_text\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moriginal_text\u001b[49m\n\u001b[0;32m 317\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\attack.py:380\u001b[0m, in \u001b[0;36mAttack.filter_transformations\u001b[1;34m(self, transformed_texts, current_text, original_text)\u001b[0m\n\u001b[0;32m 378\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconstraints_cache[(current_text, transformed_text)]:\n\u001b[0;32m 379\u001b[0m filtered_texts\u001b[38;5;241m.\u001b[39mappend(transformed_text)\n\u001b[1;32m--> 380\u001b[0m filtered_texts \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_filter_transformations_uncached\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 381\u001b[0m \u001b[43m \u001b[49m\u001b[43muncached_texts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcurrent_text\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moriginal_text\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moriginal_text\u001b[49m\n\u001b[0;32m 382\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 383\u001b[0m \u001b[38;5;66;03m# Sort transformations to ensure order is preserved between runs\u001b[39;00m\n\u001b[0;32m 384\u001b[0m filtered_texts\u001b[38;5;241m.\u001b[39msort(key\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mlambda\u001b[39;00m t: t\u001b[38;5;241m.\u001b[39mtext)\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\attack.py:342\u001b[0m, in \u001b[0;36mAttack._filter_transformations_uncached\u001b[1;34m(self, transformed_texts, current_text, original_text)\u001b[0m\n\u001b[0;32m 340\u001b[0m filtered_texts \u001b[38;5;241m=\u001b[39m C\u001b[38;5;241m.\u001b[39mcall_many(filtered_texts, original_text)\n\u001b[0;32m 341\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 342\u001b[0m filtered_texts \u001b[38;5;241m=\u001b[39m \u001b[43mC\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall_many\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfiltered_texts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcurrent_text\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 343\u001b[0m \u001b[38;5;66;03m# Default to false for all original transformations.\u001b[39;00m\n\u001b[0;32m 344\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m original_transformed_text \u001b[38;5;129;01min\u001b[39;00m transformed_texts:\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\constraints\\constraint.py:50\u001b[0m, in \u001b[0;36mConstraint.call_many\u001b[1;34m(self, transformed_texts, reference_text)\u001b[0m\n\u001b[0;32m 46\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mKeyError\u001b[39;00m:\n\u001b[0;32m 47\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mKeyError\u001b[39;00m(\n\u001b[0;32m 48\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtransformed_text must have `last_transformation` attack_attr to apply constraint\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 49\u001b[0m )\n\u001b[1;32m---> 50\u001b[0m filtered_texts \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_check_constraint_many\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 51\u001b[0m \u001b[43m \u001b[49m\u001b[43mcompatible_transformed_texts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreference_text\u001b[49m\n\u001b[0;32m 52\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 53\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mlist\u001b[39m(filtered_texts) \u001b[38;5;241m+\u001b[39m incompatible_transformed_texts\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\constraints\\semantics\\sentence_encoders\\sentence_encoder.py:179\u001b[0m, in \u001b[0;36mSentenceEncoder._check_constraint_many\u001b[1;34m(self, transformed_texts, reference_text)\u001b[0m\n\u001b[0;32m 175\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_check_constraint_many\u001b[39m(\u001b[38;5;28mself\u001b[39m, transformed_texts, reference_text):\n\u001b[0;32m 176\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Filters the list ``transformed_texts`` so that the similarity\u001b[39;00m\n\u001b[0;32m 177\u001b[0m \u001b[38;5;124;03m between the ``reference_text`` and the transformed text is greater than\u001b[39;00m\n\u001b[0;32m 178\u001b[0m \u001b[38;5;124;03m the ``self.threshold``.\"\"\"\u001b[39;00m\n\u001b[1;32m--> 179\u001b[0m scores \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_score_list\u001b[49m\u001b[43m(\u001b[49m\u001b[43mreference_text\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtransformed_texts\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 181\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, transformed_text \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(transformed_texts):\n\u001b[0;32m 182\u001b[0m \u001b[38;5;66;03m# Optionally ignore similarity score for sentences shorter than the\u001b[39;00m\n\u001b[0;32m 183\u001b[0m \u001b[38;5;66;03m# window size.\u001b[39;00m\n\u001b[0;32m 184\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[0;32m 185\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mskip_text_shorter_than_window\n\u001b[0;32m 186\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(transformed_text\u001b[38;5;241m.\u001b[39mwords) \u001b[38;5;241m<\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mwindow_size\n\u001b[0;32m 187\u001b[0m ):\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\constraints\\semantics\\sentence_encoders\\sentence_encoder.py:152\u001b[0m, in \u001b[0;36mSentenceEncoder._score_list\u001b[1;34m(self, starting_text, transformed_texts)\u001b[0m\n\u001b[0;32m 142\u001b[0m starting_text_windows\u001b[38;5;241m.\u001b[39mappend(\n\u001b[0;32m 143\u001b[0m starting_text\u001b[38;5;241m.\u001b[39mtext_window_around_index(\n\u001b[0;32m 144\u001b[0m modified_index, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mwindow_size\n\u001b[0;32m 145\u001b[0m )\n\u001b[0;32m 146\u001b[0m )\n\u001b[0;32m 147\u001b[0m transformed_text_windows\u001b[38;5;241m.\u001b[39mappend(\n\u001b[0;32m 148\u001b[0m transformed_text\u001b[38;5;241m.\u001b[39mtext_window_around_index(\n\u001b[0;32m 149\u001b[0m modified_index, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mwindow_size\n\u001b[0;32m 150\u001b[0m )\n\u001b[0;32m 151\u001b[0m )\n\u001b[1;32m--> 152\u001b[0m embeddings \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencode\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstarting_text_windows\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mtransformed_text_windows\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 153\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(embeddings, torch\u001b[38;5;241m.\u001b[39mTensor):\n\u001b[0;32m 154\u001b[0m embeddings \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mtensor(embeddings)\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\textattack\\constraints\\semantics\\sentence_encoders\\universal_sentence_encoder\\universal_sentence_encoder.py:31\u001b[0m, in \u001b[0;36mUniversalSentenceEncoder.encode\u001b[1;34m(self, sentences)\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel:\n\u001b[0;32m 30\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel \u001b[38;5;241m=\u001b[39m hub\u001b[38;5;241m.\u001b[39mload(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_tfhub_url)\n\u001b[1;32m---> 31\u001b[0m encoding \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43msentences\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 33\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(encoding, \u001b[38;5;28mdict\u001b[39m):\n\u001b[0;32m 34\u001b[0m encoding \u001b[38;5;241m=\u001b[39m encoding[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moutputs\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\tensorflow\\python\\saved_model\\load.py:686\u001b[0m, in \u001b[0;36m_call_attribute\u001b[1;34m(instance, *args, **kwargs)\u001b[0m\n\u001b[0;32m 685\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_call_attribute\u001b[39m(instance, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m--> 686\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__call__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\tensorflow\\python\\util\\traceback_utils.py:153\u001b[0m, in \u001b[0;36mfilter_traceback..error_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 151\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m 152\u001b[0m filtered_tb \u001b[38;5;241m=\u001b[39m _process_traceback_frames(e\u001b[38;5;241m.\u001b[39m__traceback__)\n\u001b[1;32m--> 153\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\u001b[38;5;241m.\u001b[39mwith_traceback(filtered_tb) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 154\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m 155\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m filtered_tb\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python38\\site-packages\\tensorflow\\python\\eager\\execute.py:54\u001b[0m, in \u001b[0;36mquick_execute\u001b[1;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[0;32m 52\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 53\u001b[0m ctx\u001b[38;5;241m.\u001b[39mensure_initialized()\n\u001b[1;32m---> 54\u001b[0m tensors \u001b[38;5;241m=\u001b[39m pywrap_tfe\u001b[38;5;241m.\u001b[39mTFE_Py_Execute(ctx\u001b[38;5;241m.\u001b[39m_handle, device_name, op_name,\n\u001b[0;32m 55\u001b[0m inputs, attrs, num_outputs)\n\u001b[0;32m 56\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m core\u001b[38;5;241m.\u001b[39m_NotOkStatusException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m 57\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m name \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n", "\u001b[1;31mUnknownError\u001b[0m: Graph execution error:\n\nJIT compilation failed.\n\t [[{{node EncoderDNN/EmbeddingLookup/EmbeddingLookupUnique/embedding_lookup/mod}}]] [Op:__inference_restored_function_body_4204]" ] } ], "source": [ "\n", "checkpt = False\n", "if checkpt :\n", " attacker = Attacker.from_checkpoint(attack, dataset, checkpoint_file)\n", "else :\n", " attacker = Attacker(attack, dataset, attack_args)\n", "\n", "attacker.attack_dataset()\n", " " ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[(('lovingly photographed in the manner of a golden book sprung to life , stuart little 2 administration sweetness largely without stickiness .', 'adversarial_example'), 1), (('ceaselessly cleverer and suspenseful .', 'adversarial_example'), 1), (('ceaselessly clever and melodramatic .', 'adversarial_example'), 1), (('the story gives exhaustive prospects for large-scale action and suspense , which director shekhar kapur supplies with sizable skill .', 'adversarial_example'), 1), (('the story lets exhaustive prospects for large-scale efforts and suspense , which director shekhar kapur supplies with tremendous skill .', 'adversarial_example'), 1), (('the story lets exhaustive prospecting for large-scale action and suspense , which director shekhar kapur supplies with tremendous skill .', 'adversarial_example'), 1), (('the story lets exhaustive prospects for large-scale deeds and suspense , which director shekhar kapur supplies with tremendous skill .', 'adversarial_example'), 1), (('the story lets exhaustive prospects for large-scale measuring and suspense , which director shekhar kapur supplies with tremendous skill .', 'adversarial_example'), 1), (('the story lets exhaustive prospecting for large-scale action and hoping , which director shekhar kapur supplies with tremendous skill .', 'adversarial_example'), 1), (('the story lets exhaustive prospects for large-scale action and suspense , which director shekhar kapur supplies with tremendous skill .', 'adversarial_example'), 1)]\n" ] } ], "source": [ "import csv\n", "\n", "# Append the header line to the file\n", "with open('train.csv', 'r+') as file:\n", " content = file.readlines()\n", " file.seek(0)\n", " file.write(\"perturbed_texts,ground_truth_output\\n\")\n", " file.writelines(content)\n", "\n", "# Read the file and populate the list\n", "adversarial_examples = []\n", "with open('train.csv', 'r') as file:\n", " reader = csv.reader(file)\n", " next(reader) # Skip the header line\n", " for perturbed_text, ground_truth_output in reader:\n", " adversarial_examples.append(((perturbed_text,) + (\"adversarial_example\",), int(ground_truth_output)))\n", "\n", "print(adversarial_examples)" ] } ], "metadata": { "kernelspec": { "display_name": "textattackenv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.18" } }, "nbformat": 4, "nbformat_minor": 2 }