PFEemp2024 commited on
Commit
752c107
1 Parent(s): b264e47

Upload Clustering.py

Browse files
Files changed (1) hide show
  1. Clustering.py +255 -0
Clustering.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import textattack
2
+ from textattack.shared import AttackedText
3
+ from sklearn.cluster import KMeans
4
+ from sklearn.metrics import silhouette_score
5
+ from math import floor, sqrt
6
+ import csv
7
+
8
+ class Clustering:
9
+ def __init__(self, file_, victim_model_wrapper, victim_model, attack):
10
+ self.file = file_
11
+ self.victim_model_wrapper = victim_model_wrapper
12
+ self.victim_model = victim_model
13
+ self.attack = attack
14
+
15
+ def get_embedding_layer(self, model, text_input):
16
+ if isinstance(model, textattack.models.helpers.T5ForTextToText):
17
+ raise NotImplementedError(
18
+ "`get_grads` for T5FotTextToText has not been implemented yet."
19
+ )
20
+
21
+ model.train()
22
+ embedding_layer = model.get_input_embeddings()
23
+ embedding_layer.weight.requires_grad = True
24
+
25
+ model.zero_grad()
26
+ model_device = next(model.parameters()).device
27
+ input_dict = tokenizer(
28
+ [text_input],
29
+ add_special_tokens=True,
30
+ return_tensors="pt",
31
+ padding="max_length",
32
+ truncation=True,
33
+ )
34
+ input_dict = input_dict.to(model_device)
35
+ embedding = embedding_layer(input_dict["input_ids"])
36
+ # embedding = embedding_layer(torch.tensor(input_dict))
37
+ return embedding
38
+
39
+ def prepare_sentences(self):
40
+ file = self.file
41
+ victim_model = self.victim_model
42
+ victim_model_wrapper = self.victim_model_wrapper
43
+ attack = self.attack
44
+ with open(file, "r") as f:
45
+ data = json.load(f)
46
+ global_sentences = []
47
+ global_masks = []
48
+ global_scores = []
49
+ for item in data["data"]:
50
+ original_words = item["original"].split()
51
+
52
+ # Iterate over each sample
53
+ sentences = []
54
+ masks = []
55
+ scores = []
56
+ _, indices_to_order = attack.get_indices_to_order(
57
+ AttackedText(item["original"])
58
+ )
59
+ for sample in item["samples"]:
60
+ scores.append(sample["score"])
61
+ attacked_text = AttackedText(sample["attacked_text"])
62
+ word2token_mapping_0 = attacked_text.align_with_model_tokens(
63
+ victim_model_wrapper
64
+ )
65
+ embedding_0 = self.get_embedding_layer(
66
+ model=victim_model, text_input=sample["attacked_text"]
67
+ )
68
+ embedding_vectors_0 = embedding_0[0].detach().cpu().numpy()
69
+
70
+ sentence_embedding = []
71
+ mask = []
72
+ for _, idx in enumerate(indices_to_order):
73
+ # index of tensor that corresponds to the index of the word
74
+ matched_tokens_0 = word2token_mapping_0[idx]
75
+ embedding_from_layer = np.mean(
76
+ embedding_vectors_0[matched_tokens_0], axis=0
77
+ )
78
+ if original_words[idx] != attacked_text.words[idx]:
79
+ mask.append(1)
80
+ sentence_embedding.append(embedding_from_layer)
81
+ else:
82
+ sentence_embedding.append(embedding_from_layer)
83
+ mask.append(0)
84
+ sentences.append(sentence_embedding)
85
+ masks.append(mask)
86
+ global_sentences.append(sentences)
87
+ global_masks.append(masks)
88
+ global_scores.append(scores)
89
+ return global_sentences, global_masks, global_scores
90
+
91
+ def get_unified_mask(self, masks):
92
+
93
+ unified_mask = np.zeros_like(masks[0])
94
+ for mask in masks:
95
+
96
+ unified_mask = np.logical_or(unified_mask, mask)
97
+
98
+ return unified_mask.astype(int)
99
+
100
+ def get_global_unified_masks(self, masks):
101
+ global_unified_masks = [self.get_unified_mask(masks=mask) for mask in masks]
102
+
103
+ return global_unified_masks
104
+
105
+ def apply_mask_on_vectors(self, sentences, mask):
106
+ for i in range(len(sentences)):
107
+ sentence = sentences[i]
108
+
109
+ sentences[i] = [
110
+ sentence[j] if mask[j] == 1 else np.zeros_like(sentence[j])
111
+ for j in range(len(sentence))
112
+ ]
113
+
114
+ return sentences
115
+
116
+ def apply_mask_on_global_vectors(self, global_sentences, unified_masks):
117
+
118
+ return [
119
+ self.apply_mask_on_vectors(sentences, mask)
120
+ for sentences, mask in zip(global_sentences, unified_masks)
121
+ ]
122
+
123
+ def matrix_to_sentences(self, matrix_sentences):
124
+
125
+ return np.vstack([np.concatenate(sentence) for sentence in matrix_sentences])
126
+
127
+ def global_matrix_to_global_sentences(self, global_matrix_sentences):
128
+ # TODO : check for the compatibility of the tex firs \u00e3
129
+ return [
130
+ self.matrix_to_sentences(sentences) for sentences in global_matrix_sentences
131
+ ]
132
+
133
+ def find_best_clustering(self, sentences, max_clusters, method="silhouette"):
134
+ if method == "silhouette":
135
+ max_silhouette_avg = -1
136
+ final_cluster_labels = None
137
+ best_k = 2
138
+
139
+ for num_clusters in range(1, max_clusters + 1):
140
+
141
+ kmeans = KMeans(n_clusters=num_clusters).fit(sentences)
142
+
143
+ cluster_labels = kmeans.labels_
144
+ silhouette_avg = silhouette_score(sentences, cluster_labels)
145
+
146
+ if silhouette_avg > max_silhouette_avg:
147
+ max_silhouette_avg = silhouette_avg
148
+ final_cluster_labels = cluster_labels
149
+ best_k = num_clusters
150
+
151
+ return kmeans.cluster_centers_, final_cluster_labels
152
+ elif method == "thumb-rule":
153
+
154
+ best_k = floor(sqrt(len(sentences)/2)) + 1
155
+ kmeans = KMeans(n_clusters=best_k).fit(sentences)
156
+
157
+ return kmeans.cluster_centers_, kmeans.labels_
158
+ elif "custom":
159
+ best_k = 5
160
+ kmeans = KMeans(n_clusters=best_k).fit(sentences)
161
+
162
+ return kmeans.cluster_centers_, kmeans.labels_
163
+
164
+ def find_global_best_clustering(
165
+ self, global_sentences, max_clusters_per_group, method
166
+ ):
167
+ return [
168
+ self.find_best_clustering(
169
+ sentences,
170
+ min(len(sentences) - 1, max_clusters_per_group),
171
+ method=method,
172
+ )
173
+ for sentences in global_sentences
174
+ ]
175
+
176
+ def get_global_distances(self, sentences, global_clustering):
177
+ global_distances = []
178
+ for X, clustering in zip(sentences, global_clustering):
179
+ centroids = clustering[0]
180
+ labels = clustering[1]
181
+ global_distances.append(
182
+ [
183
+ np.sqrt(np.sum((X[i] - centroids[labels[i]]) ** 2))
184
+ for i in range(len(X))
185
+ ]
186
+ )
187
+ return global_distances
188
+
189
+ def select_diverce_samples(self, scores, distances, clustering):
190
+
191
+ scores_ = np.array(scores)
192
+ distances_ = np.array(distances)
193
+ labels_ = np.array(clustering)
194
+ selected_samples = []
195
+
196
+ normalized_distances = (distances_) / sum(distances_)
197
+
198
+ finalscores = scores / normalized_distances
199
+
200
+ clusters = np.unique(labels_)
201
+
202
+ for cluster in clusters:
203
+
204
+ indices = np.where(labels_ == cluster)[0]
205
+
206
+ cluster_finalscores = finalscores[indices]
207
+
208
+ best_sample_index = indices[np.argmin(cluster_finalscores)]
209
+
210
+ selected_samples.append(best_sample_index)
211
+ return selected_samples
212
+
213
+ def global_select_diverce_sample(self, global_scores, sentences, global_clustering):
214
+ global_distances = self.get_global_distances(sentences, global_clustering)
215
+ labels_ = [X[1] for X in global_clustering]
216
+
217
+ return [
218
+ self.select_diverce_samples(scores, distances, clustering)
219
+ for scores, distances, clustering in zip(
220
+ global_scores, global_distances, labels_
221
+ )
222
+ ]
223
+
224
+ def save_json(self, selected_samples, output):
225
+
226
+ data = json.load(open(self.file))
227
+
228
+ selected_data = []
229
+
230
+ for item, indices in zip(data["data"], selected_samples):
231
+
232
+ new_item = item.copy()
233
+
234
+ new_item["samples"] = [item["samples"][i] for i in indices]
235
+
236
+ selected_data.append(new_item)
237
+
238
+ with open(output, "w") as f:
239
+ json.dump({"data": selected_data}, f)
240
+
241
+ def save_csv(self, selected_samples, ground_truth_output, train_file):
242
+
243
+ with open(self.file) as f:
244
+ data = json.load(f)["data"]
245
+
246
+ with open(train_file, 'a', newline='') as f:
247
+ writer = csv.writer(f)
248
+
249
+ for item, indices in zip(data, selected_samples):
250
+
251
+ samples = [item["samples"][i] for i in indices]
252
+
253
+ for sample in samples:
254
+ row = [sample, ground_truth_output]
255
+ writer.writerow(row)