187Matt commited on
Commit
a61a8c1
1 Parent(s): dbb1bae

Upload darkbert.py

Browse files
Files changed (1) hide show
  1. darkbert.py +151 -0
darkbert.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Christopher K. Schmitt
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from sentence_transformers import SentenceTransformer
16
+ from sklearn.manifold import TSNE
17
+ from sklearn.cluster import DBSCAN
18
+ from sklearn.metrics import silhouette_score, calinski_harabasz_score
19
+ from pathlib import Path
20
+ from bs4 import BeautifulSoup
21
+ from argparse import ArgumentParser
22
+
23
+ import matplotlib.pyplot as plt
24
+ import numpy as np
25
+ import nltk as nltk
26
+
27
+ # The list of huggingface transformers with tensorflow
28
+ # support and compatible tokenizers.
29
+ available_models = {
30
+ "bert": "sentence-transformers/multi-qa-distilbert-cos-v1",
31
+ "albert": "sentence-transformers/paraphrase-albert-small-v2",
32
+ "roberta": "sentence-transformers/all-distilroberta-v1",
33
+ }
34
+
35
+ display_titles = {
36
+ "bert": "BERT",
37
+ "albert": "ALBERT",
38
+ "roberta": "RoBERTa",
39
+ }
40
+
41
+ # Define the CLI interface for modeling our data with
42
+ # different transformer models. We want to control the
43
+ # type of the tokenizer and the transformer we use, as well
44
+ # as the input and output directories
45
+ parser = ArgumentParser()
46
+ parser.add_argument("-m", "--model", choices=available_models.keys(), required=True)
47
+ parser.add_argument("-i", "--input", required=True)
48
+ parser.add_argument("-o", "--output", required=True)
49
+
50
+ args = parser.parse_args()
51
+ input_dir = args.input
52
+ output_dir = args.output
53
+ model_name = available_models[args.model]
54
+ display_name = display_titles[args.model]
55
+
56
+ # To remove random glyphs and other noise, we
57
+ # only extract words in the nltk corpus
58
+ nltk.download("words")
59
+ words = set(nltk.corpus.words.words())
60
+
61
+ def extract_words(document):
62
+ cleaned = ""
63
+
64
+ for word in nltk.wordpunct_tokenize(document):
65
+ if word.lower() in words:
66
+ cleaned += word.lower() + " "
67
+
68
+ return cleaned
69
+
70
+ # Iterate over all of the files in the provided data
71
+ # directory. Parse each file with beautiful soup to parse
72
+ # the relevant text out of the markup.
73
+ data = Path(input_dir).iterdir()
74
+ data = map(lambda doc: doc.read_bytes(), data)
75
+ data = map(lambda doc: BeautifulSoup(doc, "html.parser"), data)
76
+ data = map(lambda doc: doc.get_text(), data)
77
+ data = filter(lambda doc: len(doc) > 0, data)
78
+ data = map(extract_words, data)
79
+ data = filter(lambda doc: len(doc) > 10, data)
80
+ data = list(data)
81
+
82
+ # Initilize transformer models and predict all of the
83
+ # document embeddings as computed by bert and friends
84
+ model = SentenceTransformer(model_name)
85
+ embeddings = model.encode(data, show_progress_bar=True)
86
+
87
+ # Fit TSNE model for embedding space. Sqush down to 2
88
+ # dimentions for visualization purposes.
89
+ tsne = TSNE(n_components=2, random_state=2, init="pca", learning_rate="auto", perplexity=40)
90
+ tsne = tsne.fit_transform(embeddings)
91
+
92
+ # Hyperparameter optimizations
93
+ silhouettes = []
94
+ outliers = []
95
+ ch = []
96
+
97
+ for eps in np.arange(0.001, 1, 0.001):
98
+ dbscan = DBSCAN(eps, metric="cosine", n_jobs=-1)
99
+ dbscan = dbscan.fit_predict(embeddings)
100
+
101
+ if len(np.unique(dbscan)) > 1:
102
+ silhouettes.append(silhouette_score(embeddings, dbscan, metric="cosine"))
103
+ ch.append(calinski_harabasz_score(embeddings, dbscan))
104
+ else:
105
+ silhouettes.append(0)
106
+ ch.append(0)
107
+
108
+ outliers.append(len(dbscan[dbscan == -1]))
109
+
110
+ for p in range(15, 51):
111
+ best = np.argmax(silhouettes)
112
+
113
+ dbscan = DBSCAN(0.001 + 0.001 * best, metric="cosine", n_jobs=-1)
114
+ dbscan = dbscan.fit_predict(embeddings)
115
+
116
+ tsne = TSNE(n_components=2, perplexity=p, learning_rate="auto", init="pca", metric="cosine")
117
+ tsne = tsne.fit_transform(embeddings)
118
+
119
+ plt.figure()
120
+ plt.scatter(tsne[dbscan != -1][:, 0], tsne[dbscan != -1][:, 1], s=0.5, c=dbscan[dbscan != -1], cmap="hsv")
121
+ plt.scatter(tsne[dbscan == -1][:, 0], tsne[dbscan == -1][:, 1], s=0.5, c="#abb8c3")
122
+ plt.title(f"{display_name} Embeddings Visualized with T-SNE (p = {p})")
123
+ plt.savefig(f"{output_dir}/tnse_{p:02}.png", format="png", dpi=600)
124
+ plt.close()
125
+
126
+ plt.figure()
127
+ plt.plot(np.arange(0.001, 1, 0.001), silhouettes, lw=0.5, color="#dc322f")
128
+ plt.legend()
129
+ plt.xlabel("Epsilon")
130
+ plt.ylabel("silhouette score")
131
+ plt.title("Optimizing Epsilon by Silhouette Score")
132
+ plt.savefig(f"silhouettes.png", format="png", dpi=600)
133
+ plt.close()
134
+
135
+ plt.figure()
136
+ plt.plot(np.arange(0.001, 1, 0.001), outliers, lw=0.5, color="#dc322f")
137
+ plt.legend()
138
+ plt.xlabel("Epsilon")
139
+ plt.ylabel("outliers")
140
+ plt.title("Optimizing Epsilon by Number of Outliers")
141
+ plt.savefig(f"outliers.png", format="png", dpi=600)
142
+ plt.close()
143
+
144
+ plt.figure()
145
+ plt.plot(np.arange(0.001, 1, 0.001), ch, lw=0.5, color="#dc322f")
146
+ plt.legend()
147
+ plt.xlabel("Epsilon")
148
+ plt.ylabel("Calinski-Harabasz score")
149
+ plt.title("Optimizing Epsilon by Calinski-Harabasz Score")
150
+ plt.savefig(f"calinski-harabasz.png", format="png", dpi=600)
151
+ plt.close()