"""

This script contained Python programming codes

Author : H. Lin, Ph.D., https://orcid.org/0000-0003-4060-7336 

Version: Created on 25th Feb. 2025; Lastest Updated on 17th Aug. 2025



Description of Dataset:
  Raw data of interventional drug clinical trials for Alzheimer's disease were retrieved from clinicaltrials.gov as of January 15 2025.

Using search filters of "Condition: Alzheimer", "Trial Status: Completed",  "Results: With Results", "Intervention: Drug", 423 clinical trials were returned by the search engine of clinicaltrials.gov. 

Retrieved clinical trials were further screened manually. Trials about diagnostic agents,  biological drugs/antibodies, and with missing data ones, were manually removed from dataset. i.e., only small molecule chemical drugs were studied for this project.  Finally, 177 qualified interventional drug clinical trials about Azheimer's were remtained. 


The simple distribution of the dataset is as shown in the table below: 


Table            Sum     Positive instance    Negative instance
Training set:    124             28                   96
Validation set:   53             12                   41
Total:           177             40                  137




The prediction target set is 207 Alzheimer's drug interventional clinical trials with status of "ongoing" or "recruiting", which were also retrieved from the clinicaltrials.gov using its search engine with filters.

Two types of data were used here. i.e., the small molecule drug chemical data and the clinical trial text data. 

The SMILES (the Simplified Molecular Input Line Entry System) of the drugs studied in Alzheimer's drug interventional clinical trials were downloaded from PubChem, CHEMBL, etc., databases. 

The Alzheimier's drug interventional clinical trial text records and data were downloaded from clinicaltrials.gov website, and a number of features or properties were included in the text files. For example, the study title, conditions, interventions, and outcome measurements. Based on my own domain knowledge,  10 kinds of features were extracted from the text fifles of the clinical trial records, i.e., (1) The study title, (2) Conditions, (3) Interventions, (4) Outcome measurements, (5) Age of enrollment requirement, (6) Gender of enrollment requirement, (7)  Phase / Stage of the clinical trial, (8) Amount of people enrollment, (9) Sponsor, (10) Collaborator. 

Amongst, these text features, based on my domain knowledge, reference papers, and primary screening test, 4 important features were identified and selected for further analyses, i.e., the age of enrollment requirement, the gender of enrollment requirement, the phase / stage of the clinical trial, and the amount of people enrollment.


Upon data cleaning, preprocessing and integration, structuralized data were obtained, and in this demostration script file, only a part of the full dataset, i.e., 20 Alzheimer's drug interventional clinical trials' drug SMILESs and text feature, are shown below as the example and illustration for the machine learning modelling and anlaytic pipeline.
"""




ls = [ # a list variable storing  20 sample sample dataset of Alzheimer's drug clinical trials used for machine leanring modelling
 [1, 0, 0, 2, 1, 10, 'C[C@@H]1CC[C@H]2C[C@@H](/C(=C/C=C/C=C/[C@H](C[C@H](C(=O)[C@@H]([C@@H](/C(=C/[C@H](C(=O)C[C@H](OC(=O)[C@@H]3CCCCN3C(=O)C(=O)[C@@]1(O2)O)[C@H](C)C[C@@H]4CC[C@H]([C@@H](C4)OC)O)C)/C)O)OC)C)C)/C)OC'], 
  # By listed order, those values in each list indicate: 
  # (1) sample data ID, 
  # (2) machine learing label (whether a clinical trial is succeeded[1] or not[0], 
  # (3) gender of people enrollment requirement, 
  # (4) age of enrollement requirement, 
  # (5) phase of clinical trial, 
  # (6) amount of people enrollment requirement, and 
  # (7) drug SMILES studied in the clinical trial , respectively
  # Note that, numeric values were extracted and used from above (3)~(6) text features of clinical trial.
 [2, 0, 0, 4, 1, 14, 'CCCC(CCC)C(=O)O'],  
 [3, 0, 0, 2, 0, 74, 'CN(C)CCC[C@@]1(C2=C(CO1)C=C(C=C2)C#N)C3=CC=C(C=C3)F'],  
 [4, 0, 0, 2, 0, 12, 'CC1=NC(=NC=C1OC[C@]2(C[C@H]2C(=O)NC3=NC=C(C=C3)F)C4=CC(=CC=C4)F)C'],  
 [5, 0, 0, 3, 0, 5, 'CN(C)CC(C1=CC=C(C=C1)OC)C2(CCCCC2)O'],  
 [6, 0, 0, 3, 0, 20, 'COC1=C(C=C2C(=C1)C(=NC(=N2)N3CCN(CC3)C(=O)C4=CC=CO4)N)OC'],  
 [7, 0, 0, 3, 0, 24, 'COC1=C(C=C2C(=C1)C(=NC(=N2)N3CCN(CC3)C(=O)C4=CC=CO4)N)OC'],  
 [8, 0, 1, 1, 1, 8, 'CC(C)(C)OC1=C(C=CC(=C1)CCC(C)(C)N2CC3=C(C2)C=C(C=C3)S(=O)(=O)C)O'],  
 [9, 0, 0, 1, 1, 24, 'CN1CCC2(CC1)NCC(=O)N2CC3=CC=CC=C3'],  
 [10, 0, 0, 1, 1, 36, 'CC1=CC2=C(C=C1)N(C3=C2CN(CC3)C)CCC4=CN=C(C=C4)C'],  
 [11, 0, 0, 2, 1, 36, 'C1=CC=C(C=C1)C#CC2=CC(=CN=C2)[C@@H]3[C@H](OC(=O)N3)C4=CC=CC=C4Cl'],  
 [12, 0, 0, 2, 1, 49, 'C1CN2CCC1[C@H](C2)NC(=O)C3=CC4=C(S3)C(=CC=C4)Cl'],  
 [13, 0, 0, 2, 1, 36, 'C1[C@H]2CSC(=N[C@]2(CO1)C3=C(C=CC(=C3)NC(=O)C4=NC=C(C=C4)F)F)N'],  
 [14, 0, 0, 2, 1, 42, 'C1[C@H]2CSC(=N[C@]2(CO1)C3=C(C=CC(=C3)NC(=O)C4=NC=C(C=C4)F)F)N'],  
 [15, 0, 0, 2, 1, 40, 'C1[C@H]2CSC(=N[C@]2(CO1)C3=C(C=CC(=C3)NC(=O)C4=NC=C(C=C4)F)F)N'],  
 [16, 0, 0, 2, 1, 94, 'COC1=NC=C(N=C1)C(=O)NC2=CC(=C(C=C2)F)[C@]34CN(C[C@H]3CSC(=N4)N)C5=NC=C(C=N5)F'],  
 [17, 0, 0, 2, 1, 16, 'C[C@]1(CS(=O)(=O)N(C(=N1)N)C)C2=C(C=CC(=C2)NC(=O)C3=NC=C(C=C3)F)F'],  
 [18, 0, 0, 2, 1, 32, 'C(C(=O)C(=O)O)C(=O)O'],  
 [19, 0, 0, 2, 1, 15, 'C[C@@H]1CN(C[C@H]1C2=NC3=C(C=NN3C4CCOCC4)C(=O)N2)CC5=NC=CC=N5']
  # ...
  # ...
 ]  
 




def combolst(array, dictionary):
    """
    Transforms an array and a dictionary into lists, and combines them.

    Args:
        array: A list or tuple representing the array.
        dictionary: A dictionary.

    Returns:
        A combined list containing elements from the array and dictionary values.
    """

    array_list = list(array)  # Convert array to a list if it's not already
    dict_list = list(dictionary.values())  # Get dictionary values as a list
    combined_list = array_list + dict_list  # Combine the lists

    return combined_list



''' # steps
- import multiple shit modules from rdkit 
0 construct a loop 
1 generate fingerprints MACCkeys, then array type results are transformed into list
2 generate molecular descriptor set , then dictionary type results are transformed into list
3 combine above two lists and also the clinical trials' 4-dimension values into one list, 
4 save above list file, into var and also export to local disk file on hard disk!!!
'''

# importing modules required for analyses
import rdkit 
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem import MACCSkeys
from rdkit.Chem import Descriptors 
import numpy 


newls =list() # empty list for storing combined values 


# A python loop for generating embedded vectors of data 
for i in range(0, 177):  


    # To generate MACCSkeys chemical fingerprints for drug chemical SMILES:
    mol = Chem.MolFromSmiles(ls[i][6]) # 6th element in above list storing data is the SMILES
    if mol:        
        maccs_fp = MACCSkeys.GenMACCSKeys(mol)
        array = np.zeros((1,))
        DataStructs.ConvertToNumpyArray(maccs_fp, array)
        print(f"MACCS Keys: {array}") # print and confirm the generated fingerprints of MACCS keys
    else:
        print("Invalid Mol")

   

    # Then, generate a list of the molecular descriptors of drug SMILES:
    moldesc = rdkit.Chem.Descriptors.CalcMolDescriptors(mol) 
    
    # Combine the MACCS key fingerprints and molecular descriptor vector set
    newls.append(ls[i][2:6]  +  combolst(array, moldesc))      
   


# To Save / Export the generated vector dataset into local files 
import pandas ; df=pandas.DataFrame(newls) ; df.to_csv('Your_local_disk_path_file_features.csv')


# Use sci-kit learn and lazydepredict module for a quick, simple and primary trial of machine learning (5-fold cross-valation)    
from sklearn.model_selection import train_test_split; from lazypredict.Supervised import LazyClassifier # Importing modules to be used.



# Load feature set for 5-x cross-validation (Abbreviated as 5xCV below). Note that other data types were converted to Numpy array data structure/type for loading machine learning dataset 
X = numpy.array(newls) 


# Load label set for 5xCV
y =  numpy.array(
  [ 0, 0, 0, # ...
    0, 0, 0, # ...
    0, 0, 0, # ...
    # ... ... ...
    # Note that only a part of label set data were displayed here, only for the purpose of demonstrative showcasing.
  ] ) 

  
# Splitting feature and label dataset into training and validation set
X_train, X_test, y_train, y_test = train_test_split(

  X, # Specify the feature set

  y, # Specify the label set

  test_size=0.2,  # Indicating 20 % of all the dataset are set to be the validation set.

  random_state=42, # random seed number for reproduction 

  stratify=y)  # The "stratify=y" makes the function to maintain the original positive : negative class data/instance ratio of the dataset during data splitting



# Call function to do the machine learning classification
clf = LazyClassifier( # The lazypredict module contains multiple types of machine learning classifier models, and it is able to test the dataset using multiple models through running once only.
  
  verbose = 110, # To display detailed logs during the execution of program
  
  ignore_warnings = False, # To display warnning messages during the execution of program
  
  custom_metric = None,
  
  predictions = True,
  
  random_state = 99,  # Set the random seed number for reproduction
  
  classifiers = "all" # Make function to call and use all 29 available models in the lazypredict module.
  
  );  models, predictions = clf.fit(X_train, X_test, y_train, y_test) ; print(models) # Execute the 5vCV of machine learning and display the classification results.



''' The results of full dataset (177 Alzheimier's drug interventional clinical trials) in 5-fold cross-validation of machine learning were shown below:



                               Accuracy  Balanced Accuracy  ROC AUC  F1 Score  Time Taken
Model
SGDClassifier                      0.81               0.79     0.79      0.82        0.02
LinearSVC                          0.72               0.78     0.78      0.75        0.03
BaggingClassifier                  0.86               0.78     0.78      0.86        0.03
XGBClassifier                      0.86               0.78     0.78      0.86        0.25
LGBMClassifier                     0.86               0.78     0.78      0.86        0.03
LogisticRegression                 0.78               0.77     0.77      0.79        0.02
RidgeClassifierCV                  0.72               0.73     0.73      0.74        0.06
AdaBoostClassifier                 0.78               0.72     0.72      0.79        0.09
ExtraTreesClassifier               0.75               0.71     0.71      0.76        0.06
RandomForestClassifier             0.75               0.71     0.71      0.76        0.09
RidgeClassifier                    0.67               0.70     0.70      0.69        0.00
GaussianNB                         0.58               0.69     0.69      0.61        0.02
ExtraTreeClassifier                0.72               0.69     0.69      0.74        0.00
LinearDiscriminantAnalysis         0.64               0.68     0.68      0.67        0.09
DecisionTreeClassifier             0.69               0.67     0.67      0.72        0.00
LabelPropagation                   0.72               0.64     0.64      0.73        0.02
LabelSpreading                     0.72               0.64     0.64      0.73        0.02
KNeighborsClassifier               0.78               0.63     0.63      0.77        0.09
SVC                                0.81               0.61     0.61      0.77        0.00
PassiveAggressiveClassifier        0.67               0.61     0.61      0.69        0.02
Perceptron                         0.64               0.59     0.59      0.66        0.00
BernoulliNB                        0.56               0.54     0.54      0.59        0.02
NearestCentroid                    0.53               0.52     0.52      0.57        0.03
DummyClassifier                    0.78               0.50     0.50      0.68        0.02
CalibratedClassifierCV             0.69               0.45     0.45      0.64        0.11
QuadraticDiscriminantAnalysis      0.39               0.38     0.38      0.44        0.03

'''