    
"""
Purpose: Training Multi-modal Machine Learning Models to Effectively Predict Outcomes (Successful or Not) of Phase 3 Drug Interventional Clinical Trials  


Author : H. Lin, Ph.D., https://orcid.org/0000-0003-4060-7336 


This script contained Python programming codes


Version: Created on 17th April 2025, and lastly updated on 29th Aug. 2025 


"""


# Load python modules required and to be used
import numpy 
import pandas

from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.svm import SVC, NuSVC
from sklearn.ensemble import RandomForestClassifier

from lightgbm import LGBMClassifier

from xgboost import XGBClassifier

from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, precision_score, recall_score, roc_curve, auc, precision_recall_curve

import matplotlib.pyplot as plt 



 
''' Import dataset into current working python environment'''

chem_smf = pandas.read_csv( # 1. Import and check vectors embedded from chemical SMILES fingerprint and molecular descriptor features    
  "/local_disk_path_/Chemical_Feature_Set_Vectors.csv", 
  header=None)

# Check the dimension of imported feature vectors ( the .csv file )       
print(chem_smf.shape)
  




icd = pandas.read_csv( # 2.  Import and check vectors embedded from the ICD (International Classification of Diseases) codes features
  "/local_disk_path_/ICD_Codes_Feature_Vectors.csv", 
  header=None ) 

# Check the imported data
print("icd.shape")




dis = pandas.read_csv( # 3. Import and check vectors embedded from the disease names
  "/local_disk_path/Disease_Name_Feature_Vectors.csv", 
  header=None)

# Check the (dimension) imported data
print(dis.shape)






drug = pandas.read_csv( # 4. Import and check vectors embedded from drug names features

  "/local_disk_path/Drugs_Names_Feature_Vector.csv", 

  header=None)

# Checking dimension of imported data
print(drug.shape)






# 5. Import and check vector data embedded from the texts of clinical trial title (the .RIS file Part 1 information)
ris1 = pandas.read_csv(
  
  "/Local_disk_path/Ris_Part1_EmbeddedFeatures.csv", 
  
  header=None) ;  

# Checking imported data
print(ris1.shape)




ris2 = pandas.read_csv( # 6. Import and check vector data embedded from the texts of clinical trial description (the .RIS file Part 2 information)
  "/Local_disk_path/Ris_Part2_EmbeddedFeatures.csv", 
  
  header=None) ;  

# Checking imported data
print(ris2.shape)






ris3 = pandas.read_csv( # 7. Import and check vector data embedded from the texts of clinical trial abstract (the .RIS file Part 3 information)
  "/Local_disk_path/Ris_Part3_EmbeddedFeatures.csv", 
  
  header=None) ;  

# Checking imported data
print(ris3.shape)


# If all above data were confirmed to be imported properly, create a list variable including all above imported data. 
ls_df = [dis, drug, chem_smf, icd, ris1, ris2, ris3 ];  


#  And then convert the data type to the format of Pandas data frames using the pandas.concat(  ) function, and then check if the conversion was succeeded or not.
jd = pandas.concat(ls_df,  axis=1); print( jd.shape ) # Check the dimension of joined horizontally data frame using the command print(jd.shape)


# Note, it would be better to manually or use commands to check the values of above joint data frame "jd". Sometimes, empty values were generated and included in above data frame, which could lead to error or unsatisfying results in Machine Learning. In case that, empty value(s) was/were detected in above data frame, replace all the empty value(s) to 0.0 numeric value using the command  "X = jd.fillna(0.0)" 


# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(

	X, # Specify the featureset for machine learning (imported and joint above). Note that X = jd.fillna(0.0)" should be done firstly if empty values exist in above 'jd' data frame variable. Otherwise, 'jd' varible can be directly used.   

  y, # Specify the label set for machine learning 

	test_size=0.3, # Chose to use 30% dataset as the validation set 
	random_state=42 ) # Specify the seed number of random state for reproduction in the future 


# Choose classic machine learning models for training and conduct hyperparameter tuning to optimize those models

def train_and_evaluate(model, param_grid, model_name):
    """
    Trains a machine learning classifier with automatic hyperparameter tuning using GridSearchCV and evaluates its performance on the test set.

    Args:
        model: The machine learning classifier object (e.g., SVC(), RandomForestClassifier()).
        param_grid: A dictionary defining the hyperparameters to tune and their possible values.
        model_name: A string representing the name of the model.

    Returns:
        A dictionary containing the evaluation metrics and the best trained model.
    """

    print(f"Training and evaluating {model_name}...")# The waiting time may be long due to the feature set here were large !


    # Define grid search 
    grid_search = GridSearchCV(
        model, 
        param_grid,
        cv=3, 
        scoring='accuracy', 
        n_jobs=-1, 
        verbose=1)
    

    grid_search.fit(X_train_f, y_train) 

    print(f"Best hyperparameters for {model_name}: {grid_search.best_params_}")

    best_model = grid_search.best_estimator_
    
    y_pred = best_model.predict(X_test_f)
    
    y_pred_proba = best_model.predict_proba(X_test_f)[:, 1]


    # Define metrics
    accuracy = accuracy_score(y_test, y_pred)

    f1 = f1_score(y_test, y_pred)
    
    auc_roc = roc_auc_score(y_test, y_pred_proba)
    
    precision = precision_score(y_test, y_pred)
    
    recall = recall_score(y_test, y_pred)


    # Compute ROC curve
    fpr, tpr, thresholds_roc = roc_curve(y_test, y_pred_proba)
    
    roc_auc = auc(fpr, tpr)



    # Compute Precision-Recall curve
    precision_pr, recall_pr, thresholds_pr = precision_recall_curve(y_test, y_pred_proba)

    auc_pr = auc(recall_pr, precision_pr)

    metrics = {
        'accuracy': accuracy,
        'f1_score': f1,
        'auc_roc': auc_roc,
        'precision': precision,
        'recall': recall,
        'auc_pr': auc_pr,
        'fpr': fpr,
        'tpr': tpr,
        'precision_pr': precision_pr,
        'recall_pr': recall_pr
    }

    print(f"Evaluation metrics for {model_name}:")
    for metric, value in metrics.items():
        if isinstance(value, (int, float)):
            print(f"{metric}: {value:.4f}")

    return {'model': best_model, 'metrics': metrics}




# Define the hyperparameter grids for each classifier
svm_param_grid = {'C': [0.1, 1, 10], 'kernel': ['linear', 'rbf'], 'gamma': ['scale', 0.1, 1]}

nu_svm_param_grid = {'nu': [0.1, 0.5, 0.9], 'kernel': ['linear', 'rbf'], 'gamma': ['scale', 0.1, 1]}

rf_param_grid = {'n_estimators': [50, 100, 200], 'max_depth': [None, 10, 20], 'min_samples_split': [2, 5]}

lgbm_param_grid = {'n_estimators': [50, 100], 'learning_rate': [0.01, 0.1], 'num_leaves': [20, 31]}

xgb_param_grid = {'n_estimators': [50, 100], 'learning_rate': [0.01, 0.1], 'max_depth': [3, 5]}



# Initialize the classifiers
svm_classifier = SVC(probability=True, random_state=42)

nu_svm_classifier = NuSVC(probability=True, random_state=42)

rf_classifier = RandomForestClassifier(random_state=42)

lgbm_classifier = LGBMClassifier(random_state=42)

xgb_classifier = XGBClassifier(use_label_encoder=False, eval_metric='logloss', random_state=42)



# Train and evaluate each classifier
svm_results = train_and_evaluate(svm_classifier, svm_param_grid, "Support Vector Machine")

nu_svm_results = train_and_evaluate(nu_svm_classifier, nu_svm_param_grid, "Nu Support Vector Machine")

rf_results = train_and_evaluate(rf_classifier, rf_param_grid, "Random Forest")

lgbm_results = train_and_evaluate(lgbm_classifier, lgbm_param_grid, "Light Gradient Boosting Machine")

xgb_results = train_and_evaluate(xgb_classifier, xgb_param_grid, "Extreme Gradient Boosting Machine")


# Plot ROC curves
plt.figure(figsize=(10, 8))
plt.plot(svm_results['metrics']['fpr'], svm_results['metrics']['tpr'], label=f"SVM (AUC = {svm_results['metrics']['auc_roc']:.2f})")
plt.plot(nu_svm_results['metrics']['fpr'], nu_svm_results['metrics']['tpr'], label=f"NuSVM (AUC = {nu_svm_results['metrics']['auc_roc']:.2f})")
plt.plot(rf_results['metrics']['fpr'], rf_results['metrics']['tpr'], label=f"Random Forest (AUC = {rf_results['metrics']['auc_roc']:.2f})")
plt.plot(lgbm_results['metrics']['fpr'], lgbm_results['metrics']['tpr'], label=f"LGBM (AUC = {lgbm_results['metrics']['auc_roc']:.2f})")
plt.plot(xgb_results['metrics']['fpr'], xgb_results['metrics']['tpr'], label=f"XGBoost (AUC = {xgb_results['metrics']['auc_roc']:.2f})")
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()



# Plot Precision-Recall curves
plt.figure(figsize=(10, 8))
plt.plot(svm_results['metrics']['recall_pr'], svm_results['metrics']['precision_pr'], label=f"SVM (AUC = {svm_results['metrics']['auc_pr']:.2f})")
plt.plot(nu_svm_results['metrics']['recall_pr'], nu_svm_results['metrics']['precision_pr'], label=f"NuSVM (AUC = {nu_svm_results['metrics']['auc_pr']:.2f})")
plt.plot(rf_results['metrics']['recall_pr'], rf_results['metrics']['precision_pr'], label=f"Random Forest (AUC = {rf_results['metrics']['auc_pr']:.2f})")
plt.plot(lgbm_results['metrics']['recall_pr'], lgbm_results['metrics']['precision_pr'], label=f"LGBM (AUC = {lgbm_results['metrics']['auc_pr']:.2f})")
plt.plot(xgb_results['metrics']['recall_pr'], xgb_results['metrics']['precision_pr'], label=f"XGBoost (AUC = {xgb_results['metrics']['auc_pr']:.2f})")
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall Curve')
plt.legend()
plt.show()
