ervau commited on
Commit
7dc9603
·
verified ·
1 Parent(s): a022b41

Delete processing

Browse files
processing/dmgi_model.py DELETED
@@ -1,103 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
-
4
-
5
- import torch_geometric.transforms as T
6
- from torch_geometric.nn import GCNConv
7
-
8
-
9
- def load_heterodata(path):
10
-
11
- data = torch.load(path, map_location=torch.device('cpu'))
12
-
13
- print("Available edge types in the dataset:", data.edge_types)
14
- # data['Compound'].train_mask = torch.zeros(data['Compound'].num_nodes, dtype=torch.bool)
15
- # data['Compound'].val_mask = torch.zeros(data['Compound'].num_nodes, dtype=torch.bool)
16
- # data['Compound'].test_mask = torch.zeros(data['Compound'].num_nodes, dtype=torch.bool)
17
-
18
-
19
- # train_indices = np.random.choice(data['Compound'].num_nodes, int(data['Compound'].num_nodes * 0.8), replace=False)
20
- # val_indices = np.random.choice(np.setdiff1d(np.arange(data['Compound'].num_nodes), train_indices), int(data['Compound'].num_nodes * 0.1), replace=False)
21
- # test_indices = np.setdiff1d(np.arange(data['Compound'].num_nodes), np.concatenate([train_indices, val_indices]))
22
-
23
- # data['Compound'].train_mask[train_indices] = 1
24
- # data['Compound'].val_mask[val_indices] = 1
25
- # data['Compound'].test_mask[test_indices] = 1
26
-
27
-
28
- # print(f'Train node count: {data["Compound"].train_mask.sum()}')
29
- # print(f'Val node count: {data["Compound"].val_mask.sum()}')
30
- # print(f'Test node count: {data["Compound"].test_mask.sum()}')
31
-
32
- metapaths = [
33
- [('Compound', 'CTI', 'Protein'), ('Protein', 'rev_CTI', 'Compound')],
34
- [('Drug', 'DTI', 'Protein'), ('Protein', 'rev_DTI', 'Drug')],
35
- [('Protein', 'PPI', 'Protein'), ('Protein', 'rev_PPI', 'Protein')],
36
- [('Gene', 'Orthology', 'Gene'), ('Gene', 'rev_Orthology', 'Gene')],
37
- ]
38
- print(metapaths)
39
-
40
- data = T.AddMetaPaths(metapaths, drop_orig_edge_types=True)(data)
41
- print('Available edge types in the dataset after adding metapaths:', data.edge_types)
42
-
43
- return data
44
-
45
- class DMGI(torch.nn.Module):
46
- def __init__(self, num_nodes, in_channels, out_channels, num_relations):
47
- super().__init__()
48
- self.convs = torch.nn.ModuleList(
49
- [GCNConv(in_channels, out_channels) for _ in range(num_relations)])
50
- self.M = torch.nn.Bilinear(out_channels, out_channels, 1)
51
- self.Z = torch.nn.Parameter(torch.empty(num_nodes, out_channels))
52
- self.reset_parameters()
53
-
54
- def reset_parameters(self):
55
- for conv in self.convs:
56
- conv.reset_parameters()
57
- torch.nn.init.xavier_uniform_(self.M.weight)
58
- self.M.bias.data.zero_()
59
- torch.nn.init.xavier_uniform_(self.Z)
60
-
61
- def forward(self, x, edge_indices):
62
- pos_hs, neg_hs, summaries = [], [], []
63
- for conv, edge_index in zip(self.convs, edge_indices):
64
- pos_h = F.dropout(x, p=0.5, training=self.training)
65
- pos_h = conv(pos_h, edge_index).relu()
66
- pos_hs.append(pos_h)
67
-
68
- neg_h = F.dropout(x, p=0.5, training=self.training)
69
- neg_h = neg_h[torch.randperm(neg_h.size(0), device=neg_h.device)]
70
- neg_h = conv(neg_h, edge_index).relu()
71
- neg_hs.append(neg_h)
72
-
73
- summaries.append(pos_h.mean(dim=0, keepdim=True))
74
-
75
- return pos_hs, neg_hs, summaries
76
-
77
- def loss(self, pos_hs, neg_hs, summaries):
78
- loss = 0.
79
- for pos_h, neg_h, s in zip(pos_hs, neg_hs, summaries):
80
- s = s.expand_as(pos_h)
81
- loss += -torch.log(self.M(pos_h, s).sigmoid() + 1e-15).mean()
82
- loss += -torch.log(1 - self.M(neg_h, s).sigmoid() + 1e-15).mean()
83
-
84
- pos_mean = torch.stack(pos_hs, dim=0).mean(dim=0)
85
- neg_mean = torch.stack(neg_hs, dim=0).mean(dim=0)
86
-
87
- pos_reg_loss = (self.Z - pos_mean).pow(2).sum()
88
- neg_reg_loss = (self.Z - neg_mean).pow(2).sum()
89
- loss += 0.001 * (pos_reg_loss - neg_reg_loss)
90
-
91
- return loss
92
-
93
-
94
- def load_dmgi_model(path, data):
95
-
96
- model = DMGI(data['Compound'].num_nodes,
97
- data['Compound'].x.size(-1),
98
- 64,
99
- len(data.edge_types))
100
-
101
- model.load_state_dict(torch.load(path))
102
-
103
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
processing/graph_embedding.py DELETED
@@ -1,71 +0,0 @@
1
- import os
2
- import pandas as pd
3
- import numpy as np
4
- import torch
5
- import time
6
- from datetime import datetime
7
- from unimol_tools import UniMolRepr
8
-
9
- unimol_model = UniMolRepr(data_type='molecule', remove_hs=False, use_gpu=True)
10
-
11
- def get_unimol_embeddings_batch(smiles_list, model):
12
- try:
13
- batch_repr = model.get_repr(smiles_list, return_atomic_reprs=True)
14
- cls_reprs = batch_repr['cls_repr']
15
- return np.array(cls_reprs)
16
- except Exception as e:
17
- print(f"Error embedding batch: {e}")
18
- return None
19
-
20
- def process_folder_unimol(folder_path, batch_size=2000):
21
- """
22
- Walk through the folder and process each CSV file ending with 'filtered'.
23
- Embeddings are saved in the same folder with '_graph_embedding.npy' added to the filename.
24
- """
25
- for root, dirs, files in os.walk(folder_path):
26
- for file in files:
27
- if not file.endswith("filtered.csv") and not file.endswith("mock.csv"):
28
- file_path = os.path.join(root, file)
29
- print(f"Processing file: {file_path}")
30
- try:
31
- df = pd.read_csv(file_path)
32
- column_name = 'smiles'
33
- if column_name not in df.columns:
34
- column_name = 'mol'
35
-
36
- if column_name not in df.columns:
37
- raise ValueError("'smiles' column not found in the CSV file.")
38
-
39
- df = df.dropna(subset=[column_name])
40
- smiles_list = df[column_name].tolist()
41
- print(f"Found {len(smiles_list)} valid SMILES to process.")
42
-
43
- all_embeddings = []
44
- for i in range(0, len(smiles_list), batch_size):
45
- batch = smiles_list[i:i+batch_size]
46
- embeddings = get_unimol_embeddings_batch(batch, unimol_model)
47
- if embeddings is not None:
48
- all_embeddings.append(embeddings)
49
- else:
50
- print(f"Warning: Batch {i//batch_size} failed.")
51
-
52
- if all_embeddings:
53
- final_embeddings = np.concatenate(all_embeddings)
54
- output_file = os.path.join(root, f"{os.path.splitext(file)[0]}_graph_embedding.npy")
55
- np.save(output_file, final_embeddings)
56
- print(f"Saved embeddings with shape {final_embeddings.shape} to {output_file}\n")
57
- else:
58
- print(f"No embeddings generated for {file_path}.")
59
-
60
- except Exception as e:
61
- print(f"Failed to process {file_path}: {e}\n")
62
-
63
- folder_path = "/home/g3-bbm-project/main_folder/FineTune/finetune_data_multi/finetuning_datasets/classification" # Set your top-level folder here
64
- print(f"Starting UniMol embedding processing at {datetime.now().strftime('%H:%M:%S')}")
65
- start_time = time.time()
66
-
67
- process_folder_unimol(folder_path)
68
-
69
- total_time = time.time() - start_time
70
- print(f"\nTotal execution time: {total_time:.2f} seconds")
71
- print(f"Finished at {datetime.now().strftime('%H:%M:%S')}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
processing/npy_to_h5.py DELETED
@@ -1,28 +0,0 @@
1
- import numpy as np
2
- import h5py
3
- import argparse
4
- import os
5
-
6
- """
7
- Example usage:
8
- python convert_npy_to_h5.py /path/to/input_file.npy /path/to/output_file.h5
9
- """
10
-
11
- def convert_npy_to_h5(npy_file_path, h5_output_path):
12
- if not os.path.isfile(npy_file_path):
13
- print(f"Error: Input file '{npy_file_path}' does not exist.")
14
- return
15
-
16
- data = np.load(npy_file_path)
17
-
18
- with h5py.File(h5_output_path, 'w') as h5_file:
19
- h5_file.create_dataset('data', data=data)
20
- print(f"Data from '{npy_file_path}' has been successfully saved to '{h5_output_path}'.")
21
-
22
- if __name__ == "__main__":
23
- parser = argparse.ArgumentParser(description="Convert a .npy file to a .h5 file.")
24
- parser.add_argument("npy_file", type=str, help="Path to the input .npy file.")
25
- parser.add_argument("h5_file", type=str, help="Path to the output .h5 file.")
26
-
27
- args = parser.parse_args()
28
- convert_npy_to_h5(args.npy_file, args.h5_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
processing/pretrain_dmgi.py DELETED
@@ -1,77 +0,0 @@
1
- import os
2
-
3
- import numpy as np
4
-
5
- import torch
6
- from torch.optim import Adam
7
- from torch_geometric import seed_everything
8
-
9
- from dmgi_model import load_heterodata, DMGI
10
-
11
- from datetime import datetime
12
-
13
- # set random seeds
14
- seed_everything(42)
15
- np.random.seed(42)
16
-
17
- torch.set_num_threads(5)
18
-
19
- import argparse
20
-
21
- parser = argparse.ArgumentParser()
22
- parser.add_argument('--data', default='/home/g3bbmproject/main_folder/KG/kg.pt/selformerv2_kg_heterodata_1224.pt')
23
-
24
- args = parser.parse_args()
25
-
26
-
27
- def train(data, model, optimizer):
28
- model.train()
29
- optimizer.zero_grad()
30
- x = data['Compound'].x
31
- edge_indices = data.edge_index_dict.values()
32
- pos_hs, neg_hs, summaries = model(x, edge_indices)
33
- loss = model.loss(pos_hs, neg_hs, summaries)
34
- loss.backward()
35
- optimizer.step()
36
- return float(loss)
37
-
38
-
39
- def pretrain_dmgi(hps, data, device):
40
- model = DMGI(data['Compound'].num_nodes,
41
- data['Compound'].x.size(-1),
42
- hps[0],
43
- len(data.edge_types))
44
-
45
- data, model = data.to(device), model.to(device)
46
- print(data.node_types)
47
- # Print available edge types in the dataset
48
- print("Available edge types in the dataset:", data.edge_types)
49
-
50
-
51
- optimizer = Adam(model.parameters(), lr=hps[1], weight_decay=hps[2])
52
-
53
- for epoch in range(1, 101):
54
- epoch_start = datetime.now()
55
- train_loss = train(data, model, optimizer)
56
-
57
- if epoch == 1 or epoch % 25 == 0:
58
- print(f'\tEpoch: {epoch:03d}, Loss: {train_loss:.4f}, Time: {datetime.now() - epoch_start}')
59
-
60
- return train_loss, model
61
-
62
-
63
- if __name__ == '__main__':
64
- data = load_heterodata(args.data)
65
- print(f'Loaded data: {args.data}')
66
- device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
67
- print(f'\nUsing device: {device}\n')
68
-
69
- print('Starting training...\n')
70
- train_start = datetime.now()
71
- loss, model = pretrain_dmgi([32, 0.01, 0.001], data, device)
72
- print(f'\nDone. Total training time: {datetime.now() - train_start}')
73
-
74
- # save model
75
- os.makedirs('models', exist_ok=True)
76
- torch.save(model.state_dict(), 'data/pretrained_models/kg_dmgi_model.pt')
77
- print(f'Model saved: data/pretrained_models/kg_dmgi_model.pt\n')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
processing/selfies_embedding.py DELETED
@@ -1,39 +0,0 @@
1
- import os
2
- import pandas as pd
3
- from pandarallel import pandarallel
4
- from transformers import RobertaTokenizer, RobertaModel, RobertaConfig
5
- import torch
6
-
7
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
8
- os.environ["WANDB_DISABLED"] = "true"
9
- os.environ["CUDA_VISIBLE_DEVICES"] = "0"
10
-
11
-
12
- SELFIES_DATASET_PATH = "data/temp_selfies.csv" # path to the SELFIES dataset
13
- MODEL_FILE_PATH = "data/pretrained_models/SELFormer" # path to the pre-trained SELFormer model
14
- OUTPUT_EMBEDDINGS_PATH = "data/embeddings.csv" # path to save the generated embeddings
15
-
16
-
17
- df = pd.read_csv(SELFIES_DATASET_PATH) # load the dataset
18
- print(f"Loaded dataset with {len(df)} molecules.")
19
-
20
-
21
- config = RobertaConfig.from_pretrained(MODEL_FILE_PATH) # load the pre-trained model and tokenizer
22
- config.output_hidden_states = True
23
- tokenizer = RobertaTokenizer.from_pretrained("data/RobertaFastTokenizer")
24
- model = RobertaModel.from_pretrained(MODEL_FILE_PATH, config=config)
25
-
26
-
27
- def get_sequence_embeddings(selfies):
28
- token = torch.tensor([tokenizer.encode(selfies, add_special_tokens=True, max_length=512, padding=True, truncation=True)]) # tokenize the SELFIES string
29
- output = model(token) # forward pass through the model
30
- sequence_out = output[0] # extract the sequence output and compute the mean pooling
31
- return torch.mean(sequence_out[0], dim=0).tolist()
32
-
33
- print("Generating embeddings...")
34
- pandarallel.initialize(nb_workers=5, progress_bar=True)
35
- df["sequence_embeddings"] = df.selfies.parallel_apply(get_sequence_embeddings)
36
-
37
- df.drop(columns=["selfies"], inplace=True)
38
- df.to_csv(OUTPUT_EMBEDDINGS_PATH, index=False)
39
- print(f"Embeddings saved to {OUTPUT_EMBEDDINGS_PATH}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
processing/smiles_to_selfies.py DELETED
@@ -1,52 +0,0 @@
1
- import pandas as pd
2
- from pandarallel import pandarallel
3
- import selfies as sf
4
-
5
- def to_selfies(smiles):
6
- """
7
- Converts SMILES to SELFIES representation.
8
- If an error occurs, returns the original SMILES unchanged.
9
- """
10
- try:
11
- return sf.encoder(smiles)
12
- except sf.EncoderError:
13
- print(f"EncoderError for SMILES: {smiles}")
14
- return smiles
15
-
16
- def prepare_data(path, save_to):
17
- """
18
- Reads a dataset with SMILES, converts SMILES to SELFIES, and saves the result.
19
- """
20
- chembl_df = pd.read_csv(path, sep="\t")
21
- chembl_df["selfies"] = chembl_df["canonical_smiles"] # Copy the SMILES column
22
-
23
- pandarallel.initialize()
24
- chembl_df["selfies"] = chembl_df["selfies"].parallel_apply(to_selfies)
25
- chembl_df.drop(chembl_df[chembl_df["canonical_smiles"] == chembl_df["selfies"]].index, inplace=True)
26
- chembl_df.drop(columns=["canonical_smiles"], inplace=True)
27
- chembl_df.to_csv(save_to, index=False)
28
-
29
- input_csv_path = "/home/g3bbmproject/main_folder/KG/kg.pt/our_10k_matched_data_with_embeddings.csv"
30
- output_csv_path = "data_with_selfies.csv"
31
- temp_smiles_path = "temp_smiles.csv"
32
- temp_selfies_path = "temp_selfies.csv"
33
-
34
- data = pd.read_csv(input_csv_path)
35
-
36
- # Save the SMILES column to a temporary file for conversion
37
- data[['smiles']].rename(columns={"smiles": "canonical_smiles"}).to_csv(temp_smiles_path, index=False, sep="\t")
38
-
39
- # Convert SMILES to SELFIES using the prepare_data function
40
- prepare_data(path=temp_smiles_path, save_to=temp_selfies_path)
41
-
42
- # Load the resulting SELFIES data
43
- selfies_data = pd.read_csv(temp_selfies_path)
44
-
45
- # Add the SELFIES column back to the original data
46
- data['selfies'] = selfies_data['selfies'] # Assumes the converted file has a 'selfies' column
47
-
48
- # Save the updated data to a new CSV file
49
- data.to_csv(output_csv_path, index=False)
50
-
51
- print(f'Total length of data: {len(data)}')
52
- print(f"Updated dataset with SELFIES saved to: {output_csv_path}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
processing/text_embedding.py DELETED
@@ -1,96 +0,0 @@
1
- import os
2
- import pandas as pd
3
- import torch
4
- import numpy as np
5
- from transformers import AutoTokenizer, AutoModel
6
- from tqdm import tqdm
7
-
8
- # Check for GPU availability
9
- device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
10
- print(f"Using device: {device}")
11
-
12
- # Load SciBERT tokenizer and model
13
- scibert_tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
14
- scibert_model = AutoModel.from_pretrained("allenai/scibert_scivocab_uncased").to(device)
15
- scibert_model.eval() # Set the model to evaluation mode
16
-
17
- # Function to generate text embeddings for a single text
18
- def get_text_embeddings(text, tokenizer, model, device):
19
- if isinstance(text, str) and text.strip() != "":
20
- tokens = tokenizer.encode(
21
- text,
22
- add_special_tokens=True,
23
- max_length=512,
24
- padding=True,
25
- truncation=True,
26
- return_tensors="pt"
27
- ).to(device)
28
- with torch.no_grad():
29
- output = model(tokens)
30
- text_out = output[0][0].mean(dim=0)
31
- else:
32
- text_out = torch.zeros(768).to(device)
33
- return text_out.cpu().numpy()
34
-
35
- # New function: Process all CSV files ending with 'filtered' in a folder and its subfolders
36
- def process_folder(folder_path):
37
- """
38
- Walk through the folder and process each CSV file ending with 'filtered'.
39
- Embeddings are saved in the same folder with '_text_embedding.npy' added to the base filename.
40
- """
41
- for root, dirs, files in os.walk(folder_path):
42
- for file in files:
43
- if file.endswith("filtered.csv"):
44
- filtered_path = os.path.join(root, file)
45
- base_filename = file.replace("_filtered", "")
46
- full_path = os.path.join(root, base_filename)
47
-
48
- if not os.path.exists(full_path):
49
- print(f"Corresponding full CSV file not found for {filtered_path}, skipping.\n")
50
- continue
51
-
52
- print(f"Processing file: {filtered_path}")
53
-
54
- try:
55
- # Read both full and filtered CSV files
56
- df_full = pd.read_csv(full_path)
57
- df_filtered = pd.read_csv(filtered_path)
58
-
59
- '''if ("smiles" not in df_full.columns or "smiles" not in df_filtered.column):
60
- raise ValueError("'smiles' column not found in one of the CSV files.")
61
-
62
- if "Description" not in df_filtered.columns:
63
- raise ValueError("'Description' column not found in filtered CSV file.")'''
64
-
65
- # Map smiles to description
66
-
67
- column_name = 'smiles'
68
- if base_filename == 'bace':
69
- column_name = 'mol'
70
-
71
- smiles_to_description = dict(zip(df_filtered["smiles"], df_filtered["Description"]))
72
-
73
- # Prepare Description column for full df (may include Nones)
74
- df_full["Description"] = df_full[column_name].map(smiles_to_description)
75
-
76
- # Now generate embeddings
77
- tqdm.pandas(desc=f"Embedding {base_filename}")
78
- embeddings = df_full["Description"].progress_apply(
79
- lambda text: get_text_embeddings(text, scibert_tokenizer, scibert_model, device)
80
- ).tolist()
81
-
82
- embeddings_array = np.array(embeddings)
83
-
84
- output_file = os.path.join(root, f"{os.path.splitext(base_filename)[0]}_text_embedding.npy")
85
- np.save(output_file, embeddings_array)
86
-
87
- print(f"Saved embeddings to {output_file}\n")
88
-
89
- except Exception as e:
90
- print(f"Failed to process {filtered_path}: {e}\n")
91
-
92
- # Example usage
93
- folder_path = "/home/g3-bbm-project/main_folder/FineTune/finetune_data_multi/finetuning_datasets/classification" # Set your top-level folder here
94
- print(f"Starting to process folder: {folder_path}")
95
- process_folder(folder_path)
96
- print("Folder processing complete.")