haneulpark commited on
Commit
c52364f
·
verified ·
1 Parent(s): c5ca712

Update Preprocessing Script.py

Browse files
Files changed (1) hide show
  1. Preprocessing Script.py +214 -1
Preprocessing Script.py CHANGED
@@ -15,6 +15,13 @@ import molvs
15
  standardizer = molvs.Standardizer()
16
  fragment_remover = molvs.fragment.FragmentRemover()
17
 
 
 
 
 
 
 
 
18
  #3. Resolve SMILES parse error
19
 
20
  # Smiles is 'None', found the compound on ChemSpider
@@ -48,7 +55,213 @@ for index, row in tqdm.tqdm(AA.iterrows()):
48
  for substance_id, alert in problems:
49
  print(f"substance_id: {substance_id}, problem: {alert[0]}")
50
 
 
 
 
 
51
  #5. Select columns and rename the dataset
52
 
53
  AA.rename(columns={'X': 'new SMILES'}, inplace=True)
54
- AA[['new SMILES', 'substance_id', 'aggref_index', 'logP']].to_csv('AggregatorAdvisor.csv', index=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  standardizer = molvs.Standardizer()
16
  fragment_remover = molvs.fragment.FragmentRemover()
17
 
18
+ # 2. Load the original dataset into a pandas DataFrame
19
+
20
+ # PLEASE download the 'raw_data.csv' first and run the code
21
+ # https://huggingface.co/datasets/maomlab/AggregatorAdvisor/blob/main/raw_data.csv
22
+
23
+ AA = pd.read_csv('raw_data.csv') # AA is an abbreviation of Aggregator Advisor
24
+
25
  #3. Resolve SMILES parse error
26
 
27
  # Smiles is 'None', found the compound on ChemSpider
 
55
  for substance_id, alert in problems:
56
  print(f"substance_id: {substance_id}, problem: {alert[0]}")
57
 
58
+ # Result interpretation
59
+ # - Can't kekulize mol: The error message means that kekulization would break the molecules down, so it couldn't proceed
60
+ # It doesn't mean that the molecules are bad, it just means that normalization failed
61
+
62
  #5. Select columns and rename the dataset
63
 
64
  AA.rename(columns={'X': 'new SMILES'}, inplace=True)
65
+ AA[['new SMILES', 'substance_id', 'aggref_index', 'logP']].to_csv('AggregatorAdvisor.csv', index=False)
66
+
67
+ #6. Import modules to split the dataset
68
+
69
+ import sys
70
+ from rdkit import DataStructs
71
+ from rdkit.Chem import AllChem as Chem
72
+ from rdkit.Chem import PandasTools
73
+ import scipy.spatial.distance as ssd
74
+ from scipy.cluster import hierarchy
75
+
76
+ #7. Split the dataset into test and train
77
+
78
+ class MolecularFingerprint:
79
+ def __init__(self, fingerprint):
80
+ self.fingerprint = fingerprint
81
+
82
+ def __str__(self):
83
+ return self.fingerprint.__str__()
84
+
85
+ def compute_fingerprint(molecule):
86
+ try:
87
+ fingerprint = Chem.GetMorganFingerprintAsBitVect(molecule, 2, nBits=1024)
88
+ result = np.zeros(len(fingerprint), np.int32)
89
+ DataStructs.ConvertToNumpyArray(fingerprint, result)
90
+ return MolecularFingerprint(result)
91
+ except:
92
+ print("Fingerprints for a structure cannot be calculated")
93
+ return None
94
+
95
+ def tanimoto_distances_yield(fingerprints, num_fingerprints):
96
+ for i in range(1, num_fingerprints):
97
+ yield [1 - x for x in DataStructs.BulkTanimotoSimilarity(fingerprints[i], fingerprints[:i])]
98
+
99
+ def butina_cluster(fingerprints, num_points, distance_threshold, reordering=False):
100
+ nbr_lists = [None] * num_points
101
+ for i in range(num_points):
102
+ nbr_lists[i] = []
103
+
104
+ dist_fun = tanimoto_distances_yield(fingerprints, num_points)
105
+ for i in range(1, num_points):
106
+ dists = next(dist_fun)
107
+
108
+ for j in range(i):
109
+ dij = dists[j]
110
+ if dij <= distance_threshold:
111
+ nbr_lists[i].append(j)
112
+ nbr_lists[j].append(i)
113
+
114
+ t_lists = [(len(y), x) for x, y in enumerate(nbr_lists)]
115
+ t_lists.sort(reverse=True)
116
+
117
+ res = []
118
+ seen = [0] * num_points
119
+ while t_lists:
120
+ _, idx = t_lists.pop(0)
121
+ if seen[idx]:
122
+ continue
123
+ t_res = [idx]
124
+ for nbr in nbr_lists[idx]:
125
+ if not seen[nbr]:
126
+ t_res.append(nbr)
127
+ seen[nbr] = 1
128
+ if reordering:
129
+ nbr_nbr = [nbr_lists[t] for t in t_res]
130
+ nbr_nbr = frozenset().union(*nbr_nbr)
131
+ for x, y in enumerate(t_lists):
132
+ y1 = y[1]
133
+ if seen[y1] or (y1 not in nbr_nbr):
134
+ continue
135
+ nbr_lists[y1] = set(nbr_lists[y1]).difference(t_res)
136
+ t_lists[x] = (len(nbr_lists[y1]), y1)
137
+ t_lists.sort(reverse=True)
138
+ res.append(tuple(t_res))
139
+ return tuple(res)
140
+
141
+ def hierarchal_cluster(fingerprints):
142
+
143
+ num_finger_prints = len(fingerprints)
144
+
145
+ av_cluster_size = 8
146
+ dists = []
147
+
148
+ for i in range(0, num_fingerprints):
149
+ sims = DataStructs.BulkTanimotoSimilarity(fingerprints[i], fingerprints)
150
+ dists.append([1 - x for x in sims])
151
+
152
+ dis_array = ssd.squareform(dists)
153
+ Z = hierarchy.linkage(dis_array)
154
+ average_cluster_size = av_cluster_size
155
+ cluster_amount = int(num_fingerprints / average_cluster_size)
156
+ clusters = hierarchy.cut_tree(Z, n_clusters=cluster_amount)
157
+
158
+ clusters = list(clusters.transpose()[0])
159
+ cs = []
160
+ for i in range(max(clusters) + 1):
161
+ cs.append([])
162
+
163
+ for i in range(len(clusters)):
164
+ cs[clusters[i]].append(i)
165
+ return cs
166
+
167
+ def cluster_fingerprints(fingerprints, method="Auto"):
168
+ num_fingerprints = len(fingerprints)
169
+
170
+ if method == "Auto":
171
+ method = "TB" if num_fingerprints >= 10000 else "Hierarchy"
172
+
173
+ if method == "TB":
174
+ cutoff = 0.56
175
+ print("Butina clustering is selected. Dataset size is:", num_fingerprints)
176
+ clusters = butina_cluster(fingerprints, num_fingerprints, cutoff)
177
+
178
+ return clusters
179
+
180
+ elif method == "Hierarchy":
181
+ print("Hierarchical clustering is selected. Dataset size is:", num_fingerprints)
182
+ clusters = hierarchal_cluster(fingerprints, num_fingerprints, 0.56)
183
+
184
+ return clusters
185
+
186
+ def split_dataframe(dataframe, smiles_col_index, fraction_to_train, split_for_exact_fraction=True, cluster_method="Auto"):
187
+ try:
188
+ import math
189
+ smiles_column_name = dataframe.columns[smiles_col_index]
190
+ molecule = 'molecule'
191
+ fingerprint = 'fingerprint'
192
+ group = 'group'
193
+ testing = 'testing'
194
+
195
+ try:
196
+ PandasTools.AddMoleculeColumnToFrame(dataframe, smiles_column_name, molecule)
197
+ except:
198
+ print("Exception occurred during molecule generation...")
199
+
200
+ dataframe = dataframe.loc[dataframe[molecule].notnull()]
201
+ dataframe[fingerprint] = [compute_fingerprint(m) for m in dataframe[molecule]]
202
+ dataframe = dataframe.loc[dataframe[fingerprint].notnull()]
203
+
204
+ fingerprints = [Chem.GetMorganFingerprintAsBitVect(m, 2, nBits=2048) for m in dataframe[molecule]]
205
+ clusters = cluster_fingerprints(fingerprints, method=cluster_method)
206
+
207
+ dataframe.drop([molecule, fingerprint], axis=1, inplace=True)
208
+
209
+ last_training_index = int(math.ceil(len(dataframe) * fraction_to_train))
210
+ clustered = None
211
+ cluster_no = 0
212
+ mol_count = 0
213
+
214
+ for cluster in clusters:
215
+ cluster_no = cluster_no + 1
216
+ try:
217
+ one_cluster = dataframe.iloc[list(cluster)].copy()
218
+ except:
219
+ print("Wrong indexes in Cluster: %i, Molecules: %i" % (cluster_no, len(cluster)))
220
+ continue
221
+
222
+ one_cluster.loc[:, 'ClusterNo'] = cluster_no
223
+ one_cluster.loc[:, 'MolCount'] = len(cluster)
224
+
225
+ if (mol_count < last_training_index) or (cluster_no < 2):
226
+ one_cluster.loc[:, group] = 'training'
227
+ else:
228
+ one_cluster.loc[:, group] = testing
229
+
230
+ mol_count += len(cluster)
231
+ clustered = pd.concat([clustered, one_cluster], ignore_index=True)
232
+
233
+ if split_for_exact_fraction:
234
+ print("Adjusting test to train ratio. It may split one cluster")
235
+ clustered.loc[last_training_index + 1:, group] = testing
236
+
237
+ print("Clustering finished. Training set size is %i, Test set size is %i, Fraction %.2f" %
238
+ (len(clustered.loc[clustered[group] != testing]),
239
+ len(clustered.loc[clustered[group] == testing]),
240
+ len(clustered.loc[clustered[group] == testing]) / len(clustered)))
241
+
242
+ except KeyboardInterrupt:
243
+ print("Clustering interrupted.")
244
+
245
+ return clustered
246
+
247
+
248
+ def realistic_split(df, smile_col_index, frac_train, split_for_exact_frac=True, cluster_method = "Auto"):
249
+ return split_dataframe(df.copy(), smile_col_index, frac_train, split_for_exact_frac, cluster_method=cluster_method)
250
+
251
+ def split_df_into_train_and_test_sets(df):
252
+ df['group'] = df['group'].str.replace(' ', '_')
253
+ df['group'] = df['group'].str.lower()
254
+ train = df[df['group'] == 'training']
255
+ test = df[df['group'] == 'testing']
256
+ return train, test
257
+
258
+ smiles_index = 0 # Because smiles is in the first column
259
+ realistic = realistic_split(newAA.copy(), smiles_index, 0.8, split_for_exact_frac=True, cluster_method="Auto")
260
+ realistic_train, realistic_test = split_df_into_train_and_test_sets(realistic)
261
+
262
+ #8. Test and train datasets have been made
263
+
264
+ selected_columns = realistic_train[['new SMILES', 'substance_id', 'aggref_index', 'logP', 'reference']]
265
+ selected_columns.to_csv("AggregatorAdvisor_train.csv", index=False)
266
+ selected_columns = realistic_test[['new SMILES', 'substance_id', 'aggref_index', 'logP', 'reference']]
267
+ selected_columns.to_csv("AggregatorAdvisor_test.csv", index=False)