|
|
|
import pandas as pd |
|
import numpy as np |
|
import urllib.request |
|
import rdkit |
|
from rdkit import Chem |
|
import os |
|
import gzip |
|
import molvs |
|
import json |
|
import tqdm |
|
import io |
|
import tarfile |
|
import multiprocessing |
|
from pathlib import Path |
|
|
|
standardizer = molvs.Standardizer() |
|
fragment_remover = molvs.fragment.FragmentRemover() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_dir = Path(__file__).parent |
|
tar_path = base_dir / "fsmol.tar" |
|
extract_path = base_dir / "extracted_files" / "fs-mol" |
|
|
|
|
|
|
|
if not extract_path.exists(): |
|
with tarfile.open(tar_path, "r") as tar: |
|
tar.extractall(path=base_dir / "FSMol_extracted") |
|
print("Extraction complete.") |
|
|
|
|
|
splits = ["train", "test", "valid"] |
|
|
|
|
|
for split in splits: |
|
folder_path = extract_path / split |
|
csv_output_path = base_dir / f"FSMol_{split}_initial.csv" |
|
|
|
print(f"Processing {split} data from: {folder_path}") |
|
|
|
|
|
jsonl_gz_files = list(folder_path.glob("*.jsonl.gz")) |
|
|
|
|
|
with open(csv_output_path, "w", encoding="utf-8") as csv_file: |
|
first_file = True |
|
|
|
for jsonl_gz_file in jsonl_gz_files: |
|
print(f"Processing: {jsonl_gz_file}") |
|
|
|
with gzip.open(jsonl_gz_file, "rt", encoding="utf-8") as f: |
|
chunk = [] |
|
|
|
for line in f: |
|
chunk.append(json.loads(line)) |
|
|
|
|
|
if len(chunk) >= 10**5: |
|
df = pd.DataFrame(chunk) |
|
df.to_csv(csv_file, index=False, header=first_file) |
|
first_file = False |
|
chunk = [] |
|
|
|
|
|
if chunk: |
|
df = pd.DataFrame(chunk) |
|
df.to_csv(csv_file, index=False, header=first_file) |
|
first_file = False |
|
|
|
print(f"CSV file saved: {csv_output_path}") |
|
|
|
|
|
|
|
|
|
selected_columns = ['SMILES', 'Property', 'Assay_ID', 'RegressionProperty', 'LogRegressionProperty', 'Relation'] |
|
chunksize = 10**5 |
|
|
|
for split in splits: |
|
if split == "train": |
|
first_chunk = True |
|
|
|
for chunk in pd.read_csv('FSMol_train_initial.csv', chunksize=chunksize): |
|
chunk = chunk[selected_columns] |
|
chunk['Property'] = chunk['Property'].astype(int) |
|
|
|
mode = 'w' if first_chunk else 'a' |
|
header = first_chunk |
|
chunk.to_csv('FSMol_train_formatted.csv', mode=mode, index=False, header=header) |
|
|
|
first_chunk = False |
|
|
|
else: |
|
df = pd.read_csv(f'FSMol_{split}_initial.csv') |
|
df = df[selected_columns] |
|
df['Property'] = df['Property'].astype(int) |
|
df.to_csv(f'FSMol_{split}_formatted.csv', index=False) |
|
|
|
|
|
|
|
|
|
test_formatted = pd.read_csv("FSMol_test_formatted.csv") |
|
valid_formatted = pd.read_csv("FSMol_valid_formatted.csv") |
|
|
|
def test_valid_sanitization(formatted_df, output_filename): |
|
|
|
formatted_df['X'] = [ \ |
|
rdkit.Chem.MolToSmiles( |
|
fragment_remover.remove( |
|
standardizer.standardize( |
|
rdkit.Chem.MolFromSmiles( |
|
smiles)))) |
|
for smiles in formatted_df['SMILES']] |
|
|
|
problems = [] |
|
for index, row in tqdm.tqdm(formatted_df.iterrows()): |
|
result = molvs.validate_smiles(row['X']) |
|
if len(result) == 0: |
|
continue |
|
problems.append((row['X'], result)) |
|
|
|
|
|
for result, alert in problems: |
|
print(f"SMILES: {result}, problem: {alert[0]}") |
|
|
|
formatted_df.to_csv(f"{output_filename}.csv", index=False) |
|
|
|
|
|
test_valid_sanitization(test_formatted, "FSMol_test_sanitized") |
|
test_valid_sanitization(valid_formatted, "FSMol_valid_sanitized") |
|
|
|
|
|
|
|
def process_smiles(smiles): |
|
try: |
|
mol = Chem.MolFromSmiles(smiles) |
|
if mol: |
|
mol = standardizer.standardize(mol) |
|
mol = fragment_remover.remove(mol) |
|
return Chem.MolToSmiles(mol) |
|
except: |
|
return None |
|
return None |
|
|
|
|
|
def process_chunk(df): |
|
df['X'] = df['SMILES'].map(process_smiles) |
|
return df |
|
|
|
|
|
def validate_chunk(df): |
|
for smiles in df['X']: |
|
if smiles: |
|
result = molvs.validate_smiles(smiles) |
|
if result: |
|
print(f"SMILES: {smiles}, problem: {result[0]}") |
|
return df |
|
|
|
|
|
def process_large_csv(input_file, output_prefix, chunksize=10**5, num_processes=8, start_chunk=0): |
|
with multiprocessing.Pool(processes=num_processes) as pool: |
|
|
|
skiprows = range(1, start_chunk * chunksize + 1) |
|
reader = pd.read_csv(input_file, chunksize=chunksize, header=0, skiprows=skiprows) |
|
|
|
for i, chunk in enumerate(reader, start=start_chunk): |
|
print(f"Processing chunk {i}...") |
|
|
|
processed_chunk = pool.apply(process_chunk, (chunk,)) |
|
validated_chunk = validate_chunk(processed_chunk) |
|
|
|
output_file = f"{output_prefix}_{i}.csv" |
|
validated_chunk.to_csv(output_file, index=False) |
|
|
|
|
|
process_large_csv('FSMol_train_formatted.csv', 'FSMol_train_sanitized', start_chunk=0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
FSMol_test_sanitized = pd.read_csv("FSMol_test_sanitized.csv") |
|
FSMol_valid_sanitized = pd.read_csv("FSMol_valid_sanitized.csv") |
|
|
|
|
|
def drop_column(sanitized_df, final_output): |
|
sanitized_df = sanitized_df.drop('SMILES', axis=1) |
|
|
|
non_smiles = pd.isnull(sanitized_df['X']) |
|
print(sanitized_df.loc[non_smiles, 'X']) |
|
sanitized_df.rename(columns={'X': 'SMILES', 'Property': 'Y'}, inplace=True) |
|
cols = ['SMILES'] + [col for col in sanitized_df.columns if col != 'SMILES'] |
|
sanitized_df = sanitized_df[cols] |
|
|
|
sanitized_df.to_csv(f'{final_output}.csv', index = False) |
|
sanitized_df.to_parquet(f'{final_output}.parquet', index = False) |
|
|
|
|
|
drop_column(FSMol_test_sanitized, 'FSMol_test') |
|
drop_column(FSMol_valid_sanitized, 'FSMol_valid') |
|
|
|
|
|
|
|
|
|
train_final = [] |
|
|
|
for i in range(51): |
|
train_sanitized_df = pd.read_csv(f'FSMol_train_sanitized_{i}.csv') |
|
train_sanitized_df = train_sanitized_df.drop('SMILES', axis=1) |
|
|
|
non_smiles = pd.isnull(train_sanitized_df['X']) |
|
print(train_sanitized_df.loc[non_smiles, 'X']) |
|
train_sanitized_df.rename(columns = {'X' : 'SMILES', 'Property': 'Y'}, inplace = True) |
|
cols = ['SMILES'] + [col for col in train_sanitized_df.columns if col != 'SMILES'] |
|
train_sanitized_df = train_sanitized_df [cols] |
|
|
|
train_final.append(train_sanitized_df) |
|
|
|
|
|
train_final_df = pd.concat(train_final, ignore_index=True) |
|
|
|
train_final_df.to_csv('FSMol_train.csv', index = False) |
|
train_final_df.to_parquet('FSMol_train.parquet', index = False) |
|
|
|
|
|
|
|
|
|
|