FSMol / FSMol_preprocessing.py
haneulpark's picture
Upload FSMol_preprocessing.py
0f0d104 verified
# 1. Load modules
import pandas as pd
import numpy as np
import urllib.request
import rdkit
from rdkit import Chem
import os
import gzip
import molvs
import json
import tqdm
import io
import tarfile
import multiprocessing
from pathlib import Path
standardizer = molvs.Standardizer()
fragment_remover = molvs.fragment.FragmentRemover()
# 2. Download dataset
# https://github.com/microsoft/FS-Mol/?tab=readme-ov-file
# We used this command line below:
# wget --content-disposition https://figshare.com/ndownloader/files/31345321
# 3. Convert .tar file to pandas datasets and csv files
base_dir = Path(__file__).parent
tar_path = base_dir / "fsmol.tar"
extract_path = base_dir / "extracted_files" / "fs-mol"
# Extract tar file if not already extracted
if not extract_path.exists():
with tarfile.open(tar_path, "r") as tar:
tar.extractall(path=base_dir / "FSMol_extracted")
print("Extraction complete.")
# Define dataset splits
splits = ["train", "test", "valid"]
# Process each split and save as CSV
for split in splits:
folder_path = extract_path / split
csv_output_path = base_dir / f"FSMol_{split}_initial.csv"
print(f"Processing {split} data from: {folder_path}")
# Collect all JSONL.GZ files in the folder
jsonl_gz_files = list(folder_path.glob("*.jsonl.gz"))
# Open CSV file for writing
with open(csv_output_path, "w", encoding="utf-8") as csv_file:
first_file = True # Track if it's the first file to include headers
for jsonl_gz_file in jsonl_gz_files:
print(f"Processing: {jsonl_gz_file}") # Debugging
with gzip.open(jsonl_gz_file, "rt", encoding="utf-8") as f:
chunk = []
for line in f:
chunk.append(json.loads(line))
# Write in chunks to avoid memory overflow
if len(chunk) >= 10**5:
df = pd.DataFrame(chunk)
df.to_csv(csv_file, index=False, header=first_file)
first_file = False # Exclude headers from subsequent writes
chunk = [] # Reset chunk
# Write any remaining data
if chunk:
df = pd.DataFrame(chunk)
df.to_csv(csv_file, index=False, header=first_file)
first_file = False
print(f"CSV file saved: {csv_output_path}")
# 4. Formatting
selected_columns = ['SMILES', 'Property', 'Assay_ID', 'RegressionProperty', 'LogRegressionProperty', 'Relation']
chunksize = 10**5
for split in splits:
if split == "train": # We are dividing the train split into several chuncks as it the split is huge
first_chunk = True
for chunk in pd.read_csv('FSMol_train_initial.csv', chunksize=chunksize):
chunk = chunk[selected_columns]
chunk['Property'] = chunk['Property'].astype(int)
mode = 'w' if first_chunk else 'a'
header = first_chunk
chunk.to_csv('FSMol_train_formatted.csv', mode=mode, index=False, header=header)
first_chunk = False
else:
df = pd.read_csv(f'FSMol_{split}_initial.csv')
df = df[selected_columns]
df['Property'] = df['Property'].astype(int)
df.to_csv(f'FSMol_{split}_formatted.csv', index=False)
# 5. SMILES sanitization
# SMILES in the test and validation splits are just sanitized, however, because of the size of the train split, we use chunks for it
test_formatted = pd.read_csv("FSMol_test_formatted.csv")
valid_formatted = pd.read_csv("FSMol_valid_formatted.csv")
def test_valid_sanitization(formatted_df, output_filename):
formatted_df['X'] = [ \
rdkit.Chem.MolToSmiles(
fragment_remover.remove(
standardizer.standardize(
rdkit.Chem.MolFromSmiles(
smiles))))
for smiles in formatted_df['SMILES']]
problems = []
for index, row in tqdm.tqdm(formatted_df.iterrows()):
result = molvs.validate_smiles(row['X'])
if len(result) == 0:
continue
problems.append((row['X'], result))
# Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
print(f"SMILES: {result}, problem: {alert[0]}")
formatted_df.to_csv(f"{output_filename}.csv", index=False)
# Run for both test and train dataset
test_valid_sanitization(test_formatted, "FSMol_test_sanitized")
test_valid_sanitization(valid_formatted, "FSMol_valid_sanitized")
# SMILES in Train split sanitization
def process_smiles(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
if mol:
mol = standardizer.standardize(mol)
mol = fragment_remover.remove(mol)
return Chem.MolToSmiles(mol)
except:
return None
return None
def process_chunk(df):
df['X'] = df['SMILES'].map(process_smiles)
return df
def validate_chunk(df):
for smiles in df['X']:
if smiles:
result = molvs.validate_smiles(smiles)
if result:
print(f"SMILES: {smiles}, problem: {result[0]}")
return df
def process_large_csv(input_file, output_prefix, chunksize=10**5, num_processes=8, start_chunk=0):
with multiprocessing.Pool(processes=num_processes) as pool:
skiprows = range(1, start_chunk * chunksize + 1)
reader = pd.read_csv(input_file, chunksize=chunksize, header=0, skiprows=skiprows)
for i, chunk in enumerate(reader, start=start_chunk):
print(f"Processing chunk {i}...")
processed_chunk = pool.apply(process_chunk, (chunk,))
validated_chunk = validate_chunk(processed_chunk)
output_file = f"{output_prefix}_{i}.csv"
validated_chunk.to_csv(output_file, index=False)
# Run for train split. This would take more than 20 hours
process_large_csv('FSMol_train_formatted.csv', 'FSMol_train_sanitized', start_chunk=0)
# 6. Final formatting
# Remove the old 'SMILES' (unsanitized) column
# Check if there is 'non' SMILES in the 'X' column
# Combine the train chuncks
# Save final files
FSMol_test_sanitized = pd.read_csv("FSMol_test_sanitized.csv")
FSMol_valid_sanitized = pd.read_csv("FSMol_valid_sanitized.csv")
# Test and validation split final formatting
def drop_column(sanitized_df, final_output):
sanitized_df = sanitized_df.drop('SMILES', axis=1)
non_smiles = pd.isnull(sanitized_df['X'])
print(sanitized_df.loc[non_smiles, 'X']) # Result: There is no Non SMILES in the 'X' column
sanitized_df.rename(columns={'X': 'SMILES', 'Property': 'Y'}, inplace=True)
cols = ['SMILES'] + [col for col in sanitized_df.columns if col != 'SMILES'] # Move the column position
sanitized_df = sanitized_df[cols]
sanitized_df.to_csv(f'{final_output}.csv', index = False)
sanitized_df.to_parquet(f'{final_output}.parquet', index = False)
drop_column(FSMol_test_sanitized, 'FSMol_test')
drop_column(FSMol_valid_sanitized, 'FSMol_valid')
# Train split final formatting
train_final = []
for i in range(51):
train_sanitized_df = pd.read_csv(f'FSMol_train_sanitized_{i}.csv')
train_sanitized_df = train_sanitized_df.drop('SMILES', axis=1)
non_smiles = pd.isnull(train_sanitized_df['X'])
print(train_sanitized_df.loc[non_smiles, 'X'])
train_sanitized_df.rename(columns = {'X' : 'SMILES', 'Property': 'Y'}, inplace = True)
cols = ['SMILES'] + [col for col in train_sanitized_df.columns if col != 'SMILES'] # Move the column position
train_sanitized_df = train_sanitized_df [cols]
train_final.append(train_sanitized_df) # Append DataFrame to the list
# Combine all DataFrames into one
train_final_df = pd.concat(train_final, ignore_index=True)
train_final_df.to_csv('FSMol_train.csv', index = False)
train_final_df.to_parquet('FSMol_train.parquet', index = False)