import torch
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils import data
from torch.utils.data import SequentialSampler
from torch import nn
import yaml

import matplotlib.pyplot as plt
import numpy as np
from time import time
from sklearn.metrics import (
    mean_squared_error,
    roc_auc_score,
    average_precision_score,
    f1_score,
    log_loss,
)
from lifelines.utils import concordance_index
from scipy.stats import pearsonr
import pickle

torch.manual_seed(2)
np.random.seed(3)
import copy
from prettytable import PrettyTable

import os

from DeepPurpose.utils import *
from DeepPurpose.encoders import *

from torch.utils.tensorboard import SummaryWriter


class Classifier(nn.Sequential):
    """
    *Classifier* is a class that inherits from nn.Sequential. It is used to make the prediction for DBTA, and serves as a basic component of the class DBTA.

    Parameters:
            model_drug (DeepPurpose.models.XX): Encoder model for drug. XX can be "transformer", "MPNN", "CNN", "CNN_RNN" ...,
            model_protein (DeepPurpose.models.XX): Encoder model for protein. XX can be "transformer", "CNN", "CNN_RNN" ...,
            **config (kwargs, keyword arguments): Specify the parameter of classifier.
    """

    def __init__(self, model_drug, model_protein, **config):
        super().__init__()
        self.input_dim_drug = config["hidden_dim_drug"]
        self.input_dim_protein = config["hidden_dim_protein"]

        self.model_drug = model_drug
        self.model_protein = model_protein

        self.dropout = nn.Dropout(0.1)

        self.hidden_dims = config["cls_hidden_dims"]
        layer_size = len(self.hidden_dims) + 1
        dims = [self.input_dim_drug + self.input_dim_protein] + self.hidden_dims + [1]

        self.predictor = nn.ModuleList(
            [nn.Linear(dims[i], dims[i + 1]) for i in range(layer_size)]
        )

    def forward(self, v_D, v_P):
        """
        The forward function implements the feedforward procedure of Classifier.

        Parameters:
                v_D (many types): Input feature for drug encoder model, like "DeepPurpose.models.transformer", "DeepPurpose.models.CNN", "DeepPurpose.models.CNN_RNN", "DeepPurpose.models.MPNN".
                v_P (many types): Input feature for protein encoder model, like "DeepPurpose.models.transformer", "DeepPurpose.models.CNN", "DeepPurpose.models.CNN_RNN".

        Return:
                v_f (torch.Tensor): The prediction result of Classifier.
        """
        # each encoding
        v_D = self.model_drug(v_D)
        v_P = self.model_protein(v_P)
        # concatenate and classify
        v_f = torch.cat((v_D, v_P), 1)
        for i, l in enumerate(self.predictor):
            if i == (len(self.predictor) - 1):
                v_f = l(v_f)
            else:
                v_f = F.relu(self.dropout(l(v_f)))
        return v_f


def model_pretrained(path_dir=None, model=None):
    """
    Load a pretrained DBTA model.

    Parameters:
            path_dir (str): The path of the pretrained model directory.
            model (str): The name of the pretrained models available for downloads (see Documentation for more details).

    Return:
            model (DBTA): An instance of DBTA.
    """
    if model is not None:
        path_dir = download_pretrained_model(model)
    config = load_dict(path_dir)
    model = DBTA(**config)
    model.load_pretrained(path_dir + "/model.pt")
    return model


def repurpose(
    X_repurpose: list,
    target: str,
    model: "DBTA",
    drug_names: list = None,
    target_name: str = None,
    result_folder: str = "./result/",
    convert_y: bool = False,
    output_num_max: int = 10,
    verbose: bool = True,
):
    # X_repurpose: a list of SMILES string
    # target: one target

    print("repurposing...")

    # get prediction results
    df_data, _, _ = data_process(
        X_repurpose,
        target,
        drug_encoding=model.drug_encoding,
        target_encoding=model.target_encoding,
        split_method="repurposing_VS",
    )
    y_pred = model.predict(df_data)
    if convert_y:
        y_pred = convert_y_unit(np.array(y_pred), "p", "nM")

    if drug_names is None:
        drug_names = ["Drug " + str(i) for i in range(len(X_repurpose))]
    if target_name is None:
        target_name = "Target"
    print_list = []
    for i in range(len(X_repurpose)):
        if model.binary:
            if y_pred[i] > 0.5:
                string_lst = [
                    drug_names[i],
                    target_name,
                    "YES",
                    "{0:.2f}".format(y_pred[i]),
                ]
            else:
                string_lst = [
                    drug_names[i],
                    target_name,
                    "NO",
                    "{0:.2f}".format(y_pred[i]),
                ]
        else:
            #### regression
            #### Rank, Drug Name, Target Name, binding score
            string_lst = [
                drug_names[i],
                target_name,
                "{0:.2f}".format(y_pred[i]),
            ]
            print_list.append((string_lst, y_pred[i]))

    if convert_y:
        print_list.sort(key=lambda x: x[1])
    else:
        print_list.sort(key=lambda x: x[1], reverse=True)
    print_list = [i[0] for i in print_list]

    # print results
    print("---------------")
    if target_name is not None and verbose:
        print("Drug Repurposing Result for " + target_name)

    if model.binary:
        table_header = [
            "Rank",
            "Drug Name",
            "Target Name",
            "Interaction",
            "Probability",
        ]
    else:
        ### regression
        table_header = ["Rank", "Drug Name", "Target Name", "Binding Score"]
    table = PrettyTable(table_header)

    for idx, lst in enumerate(print_list):
        lst = [idx + 1] + lst
        table.add_row(lst)

    fo = os.path.join(result_folder, "repurposing.txt")
    with open(fo, "w") as fout:
        fout.write(table.get_string())
    if verbose:
        print(table.get_string(row_filter=lambda row: row[0] <= output_num_max))
        if output_num_max < len(print_list):
            print("checkout " + fo + " for the whole list")
    return y_pred


def virtual_screening(
    X_repurpose,
    target,
    model,
    drug_names=None,
    target_names=None,
    result_folder="./result/",
    convert_y=False,
    output_num_max=10,
    verbose=True,
):
    # X_repurpose: a list of SMILES string
    # target: a list of targets

    fo = os.path.join(result_folder, "virtual_screening.txt")
    print_list = []
    if drug_names is None:
        drug_names = ["Drug " + str(i) for i in range(len(X_repurpose))]
    if target_names is None:
        target_names = ["Target " + str(i) for i in range(len(target))]
    if model.binary:
        table_header = [
            "Rank",
            "Drug Name",
            "Target Name",
            "Interaction",
            "Probability",
        ]
    else:
        ### regression
        table_header = ["Rank", "Drug Name", "Target Name", "Binding Score"]
    table = PrettyTable(table_header)

    with open(fo, "w") as fout:
        print("virtual screening...")
        df_data, _, _ = data_process(
            X_repurpose,
            target,
            drug_encoding=model.drug_encoding,
            target_encoding=model.target_encoding,
            split_method="repurposing_VS",
        )
        y_pred = model.predict(df_data)

        if convert_y:
            y_pred = convert_y_unit(np.array(y_pred), "p", "nM")

        print("---------------")
        if drug_names is not None and target_names is not None:
            if verbose:
                print("Virtual Screening Result")
            f_d = max([len(o) for o in drug_names]) + 1
            f_p = max([len(o) for o in target_names]) + 1
            for i in range(len(target)):
                if model.binary:
                    if y_pred[i] > 0.5:
                        string_lst = [
                            drug_names[i],
                            target_names[i],
                            "YES",
                            "{0:.2f}".format(y_pred[i]),
                        ]

                    else:
                        string_lst = [
                            drug_names[i],
                            target_names[i],
                            "NO",
                            "{0:.2f}".format(y_pred[i]),
                        ]

                else:
                    ### regression
                    string_lst = [
                        drug_names[i],
                        target_names[i],
                        "{0:.2f}".format(y_pred[i]),
                    ]

                print_list.append((string_lst, y_pred[i]))
        if convert_y:
            print_list.sort(key=lambda x: x[1])
        else:
            print_list.sort(key=lambda x: x[1], reverse=True)
        print_list = [i[0] for i in print_list]
        for idx, lst in enumerate(print_list):
            lst = [str(idx + 1)] + lst
            table.add_row(lst)
        fout.write(table.get_string())

    if verbose:
        with open(fo, "r") as fin:
            lines = fin.readlines()
            for idx, line in enumerate(lines):
                if idx < 13:
                    print(line, end="")
                else:
                    print("checkout " + fo + " for the whole list")
                    break
        print()

    return y_pred


def dgl_collate_func(x):
    """
    Collate function for DGL graph batch processing in data loaders.

    This function takes a batch of samples and processes them for Deep Graph Library (DGL) models.
    It batches the graph data for drugs, converts protein data to tensors, and prepares the labels.

    Parameters:
            x (list): List of tuples, where each tuple contains (drug_graph, protein_features, label)

    Returns:
            tuple: A tuple containing:
                    - d (dgl.DGLGraph): Batched DGL graph for drug molecules
                    - p (torch.Tensor): Tensor of protein features
                    - y (torch.Tensor): Tensor of labels
    """
    d, p, y = zip(*x)
    import dgl

    d = dgl.batch(d)
    p = np.array(p)
    y = np.array(y)
    return d, torch.tensor(p), torch.tensor(y)


class DBTA:
    """
    Drug Target Binding Affinity (DBTA) model for predicting drug-target interactions.

    This class implements a deep learning model for predicting drug-target binding affinity.
    It supports various drug and protein encoding methods and can be used for both binary
    classification (interaction prediction) and regression (binding affinity prediction) tasks.

    Parameters:
            **config (dict): Configuration dictionary containing the following keys:

                    - drug_encoding (str): Encoding method for drugs. Options include:
                            - 'Morgan', 'ErG', 'Pubchem', 'Daylight', 'rdkit_2d_normalized', 'ESPF'
                            - 'CNN', 'CNN_RNN', 'Transformer', 'MPNN', 'DGL_GCN',
                            - 'DGL_NeuralFP', 'DGL_GIN_AttrMasking', 'DGL_GIN_ContextPred', 'DGL_AttentiveFP'
                    - target_encoding (str): Encoding method for proteins. Options include:
                            - 'AAC', 'PseudoAAC', 'Conjoint_triad', 'Quasi-seq', 'ESPF'
                            - 'CNN', 'CNN_RNN', 'Transformer'
                    - hidden_dim_drug (int): Hidden dimension for drug encoder
                    - hidden_dim_protein (int): Hidden dimension for protein encoder
                    - mlp_hidden_dims_drug (list): Hidden dimensions for drug MLP layers
                    - mlp_hidden_dims_target (list): Hidden dimensions for protein MLP layers
                    - cls_hidden_dims (list): Hidden dimensions for classifier layers
                    - batch_size (int): Batch size for training
                    - train_epoch (int): Number of training epochs
                    - LR (float): Learning rate
                    - decay (float): Weight decay for optimizer
                    - num_workers (int): Number of data loading workers
                    - cuda_id (int, optional): GPU device ID to use
                    - result_folder (str): Directory to save results

    Attributes:
            model_drug: Drug encoder model
            model_protein: Protein encoder model
            model: Combined classifier model
            device: Device to run the model on (CPU/GPU)
            binary (bool): Whether the task is binary classification
            drug_encoding (str): Drug encoding method used
            target_encoding (str): Protein encoding method used
            result_folder (str): Directory for saving results
    """

    def __init__(self, **config):
        drug_encoding = config["drug_encoding"]
        target_encoding = config["target_encoding"]

        drug_encoders = {
            "Morgan": MLP,
            "ErG": MLP,
            "Pubchem": MLP,
            "Daylight": MLP,
            "rdkit_2d_normalized": MLP,
            "ESPF": MLP,
            "CNN": CNN,
            "CNN_RNN": CNN_RNN,
            "Transformer": transformer,
            "MPNN": MPNN,
            "DGL_GCN": DGL_GCN,
            "DGL_NeuralFP": DGL_NeuralFP,
            "DGL_GIN_AttrMasking": DGL_GIN_AttrMasking,
            "DGL_GIN_ContextPred": DGL_GIN_ContextPred,
            "DGL_AttentiveFP": DGL_AttentiveFP,
        }

        target_encoders = {
            "AAC": MLP,
            "PseudoAAC": MLP,
            "Conjoint_triad": MLP,
            "Quasi-seq": MLP,
            "ESPF": MLP,
            "CNN": CNN,
            "CNN_RNN": CNN_RNN,
            "Transformer": transformer,
        }

        if drug_encoding in drug_encoders:
            if drug_encoding.startswith("DGL") or drug_encoding == "MPNN":
                self.model_drug = drug_encoders[drug_encoding](**config)
            else:
                self.model_drug = drug_encoders[drug_encoding]("drug", **config)
        else:
            raise AttributeError(
                "Please use one of the available encoding method for drug."
            )

        if target_encoding in target_encoders:
            self.model_protein = target_encoders[target_encoding]("protein", **config)
        else:
            raise AttributeError(
                "Please use one of the available encoding method for protein."
            )

        self.model = Classifier(self.model_drug, self.model_protein, **config)
        self.config = config

        # Set device based on CUDA availability and configuration
        cuda_id = self.config.get("cuda_id")
        if torch.cuda.is_available():
            if cuda_id is not None:
                self.device = torch.device(f"cuda:{cuda_id}")
            else:
                self.device = torch.device("cuda")
        else:
            self.device = torch.device("cpu")

        self.drug_encoding = drug_encoding
        self.target_encoding = target_encoding
        self.result_folder = config["result_folder"]
        if not os.path.exists(self.result_folder):
            os.mkdir(self.result_folder)
        self.binary = False
        if "num_workers" not in self.config.keys():
            self.config["num_workers"] = 0
        if "decay" not in self.config.keys():
            self.config["decay"] = 0

    def test_(self, data_generator, model, repurposing_mode=False, test=False):
        """
        Evaluates the model on a given data generator and returns various metrics based on the model's binary classification or regression task.

        Parameters:
                data_generator (iterator): An iterator of torch.utils.data.DataLoader. It can be test data or validation data.
                model (DeepPurpose.models.Classifier): The model of DBTA.
                repurposing_mode (bool, optional): If repurposing_mode is True, then do repurposing. Otherwise, do compute the accuracy (including AUC score). Defaults to False.
                test (bool, optional): If test is True, plot ROC-AUC and PR-AUC curve. Otherwise, pass. Defaults to False.

        Returns:
                varies: Depending on the model's task and the parameters, returns different metrics such as AUC score, average precision score, F1 score, log loss, mean squared error, Pearson correlation coefficient, and concordance index.
        """
        y_pred = []
        y_label = []
        model.eval()
        for i, (v_d, v_p, label) in enumerate(data_generator):
            if self.drug_encoding in [
                "MPNN",
                "Transformer",
                "DGL_GCN",
                "DGL_NeuralFP",
                "DGL_GIN_AttrMasking",
                "DGL_GIN_ContextPred",
                "DGL_AttentiveFP",
            ]:
                v_d = v_d
            else:
                v_d = v_d.float().to(self.device)
            if self.target_encoding == "Transformer":
                v_p = v_p
            else:
                v_p = v_p.float().to(self.device)
            score = self.model(v_d, v_p)
            if self.binary:
                m = torch.nn.Sigmoid()
                logits = torch.squeeze(m(score)).detach().cpu().numpy()
            else:
                loss_fct = torch.nn.MSELoss()
                n = torch.squeeze(score, 1)
                loss = loss_fct(
                    n,
                    Variable(torch.from_numpy(np.array(label)).float()).to(self.device),
                )
                logits = torch.squeeze(score).detach().cpu().numpy()
            label_ids = label.to("cpu").numpy()
            y_label = y_label + label_ids.flatten().tolist()
            y_pred = y_pred + logits.flatten().tolist()
            outputs = np.asarray([1 if i else 0 for i in (np.asarray(y_pred) >= 0.5)])
        model.train()
        if self.binary:
            if repurposing_mode:
                return y_pred
            ## ROC-AUC curve
            if test:
                roc_auc_file = os.path.join(self.result_folder, "roc-auc.jpg")
                plt.figure(0)
                roc_curve(
                    y_pred,
                    y_label,
                    roc_auc_file,
                    self.drug_encoding + "_" + self.target_encoding,
                )
                plt.figure(1)
                pr_auc_file = os.path.join(self.result_folder, "pr-auc.jpg")
                prauc_curve(
                    y_pred,
                    y_label,
                    pr_auc_file,
                    self.drug_encoding + "_" + self.target_encoding,
                )

            return (
                roc_auc_score(y_label, y_pred),
                average_precision_score(y_label, y_pred),
                f1_score(y_label, outputs),
                log_loss(y_label, outputs),
                y_pred,
            )
        else:
            if repurposing_mode:
                return y_pred
            return (
                mean_squared_error(y_label, y_pred),
                pearsonr(y_label, y_pred)[0],
                pearsonr(y_label, y_pred)[1],
                concordance_index(y_label, y_pred),
                y_pred,
                loss,
            )

    def train(self, train, val=None, test=None, verbose=True):
        """
        Trains the model on the given training data and evaluates it on validation and test data if provided.

        Parameters:
                train (pd.DataFrame): The training dataset.
                val (pd.DataFrame, optional): The validation dataset. Defaults to None.
                test (pd.DataFrame, optional): The test dataset. Defaults to None.
                verbose (bool, optional): If True, prints training and validation metrics. Defaults to True.

        Returns:
                None: This method updates the model's state but does not return anything.
        """
        if len(train.Label.unique()) == 2:
            self.binary = True
            self.config["binary"] = True

        lr = self.config["LR"]
        decay = self.config["decay"]
        BATCH_SIZE = self.config["batch_size"]
        train_epoch = self.config["train_epoch"]
        if "test_every_X_epoch" in self.config.keys():
            test_every_X_epoch = self.config["test_every_X_epoch"]
        else:
            test_every_X_epoch = 40
        loss_history = []

        self.model = self.model.to(self.device)

        # support multiple GPUs
        if torch.cuda.device_count() > 1:
            if verbose:
                print("Let's use " + str(torch.cuda.device_count()) + " GPUs!")
            self.model = nn.DataParallel(self.model, dim=0)
        elif torch.cuda.device_count() == 1:
            if verbose:
                print("Let's use " + str(torch.cuda.device_count()) + " GPU!")
        else:
            if verbose:
                print("Let's use CPU/s!")
        # Future TODO: support multiple optimizers with parameters
        opt = torch.optim.Adam(self.model.parameters(), lr=lr, weight_decay=decay)
        if verbose:
            print("--- Data Preparation ---")

        params = {
            "batch_size": BATCH_SIZE,
            "shuffle": True,
            "num_workers": self.config["num_workers"],
            "drop_last": False,
        }
        if self.drug_encoding == "MPNN":
            params["collate_fn"] = mpnn_collate_func
        elif self.drug_encoding in [
            "DGL_GCN",
            "DGL_NeuralFP",
            "DGL_GIN_AttrMasking",
            "DGL_GIN_ContextPred",
            "DGL_AttentiveFP",
        ]:
            params["collate_fn"] = dgl_collate_func

        training_generator = data.DataLoader(
            data_process_loader(
                train.index.values, train.Label.values, train, **self.config
            ),
            **params,
        )
        if val is not None:
            validation_generator = data.DataLoader(
                data_process_loader(
                    val.index.values, val.Label.values, val, **self.config
                ),
                **params,
            )

        if test is not None:
            info = data_process_loader(
                test.index.values, test.Label.values, test, **self.config
            )
            params_test = {
                "batch_size": BATCH_SIZE,
                "shuffle": False,
                "num_workers": self.config["num_workers"],
                "drop_last": False,
                "sampler": SequentialSampler(info),
            }

            if self.drug_encoding == "MPNN":
                params_test["collate_fn"] = mpnn_collate_func
            elif self.drug_encoding in [
                "DGL_GCN",
                "DGL_NeuralFP",
                "DGL_GIN_AttrMasking",
                "DGL_GIN_ContextPred",
                "DGL_AttentiveFP",
            ]:
                params_test["collate_fn"] = dgl_collate_func
            testing_generator = data.DataLoader(
                data_process_loader(
                    test.index.values, test.Label.values, test, **self.config
                ),
                **params_test,
            )

        # early stopping
        if self.binary:
            max_auc = 0
        else:
            max_MSE = 10000
        model_max = copy.deepcopy(self.model)

        valid_metric_record = []
        valid_metric_header = ["# epoch"]
        if self.binary:
            valid_metric_header.extend(["AUROC", "AUPRC", "F1"])
        else:
            valid_metric_header.extend(
                ["MSE", "Pearson Correlation", "with p-value", "Concordance Index"]
            )
        table = PrettyTable(valid_metric_header)
        float2str = lambda x: "%0.4f" % x
        if verbose:
            print("--- Go for Training ---")
        writer = SummaryWriter(comment=self.result_folder)
        t_start = time()
        iteration_loss = 0
        for epo in range(train_epoch):
            for i, (v_d, v_p, label) in enumerate(training_generator):
                if self.target_encoding == "Transformer":
                    v_p = v_p
                else:
                    v_p = v_p.float().to(self.device)
                if self.drug_encoding in [
                    "MPNN",
                    "Transformer",
                    "DGL_GCN",
                    "DGL_NeuralFP",
                    "DGL_GIN_AttrMasking",
                    "DGL_GIN_ContextPred",
                    "DGL_AttentiveFP",
                ]:
                    v_d = v_d
                else:
                    v_d = v_d.float().to(self.device)
                    # score = self.model(v_d, v_p.float().to(self.device))

                score = self.model(v_d, v_p)
                label = Variable(torch.from_numpy(np.array(label)).float()).to(
                    self.device
                )

                if self.binary:
                    loss_fct = torch.nn.BCELoss()
                    m = torch.nn.Sigmoid()
                    n = torch.squeeze(m(score), 1)
                    loss = loss_fct(n, label)
                else:
                    loss_fct = torch.nn.MSELoss()
                    n = torch.squeeze(score, 1)
                    loss = loss_fct(n, label)
                loss_history.append(loss.item())
                writer.add_scalar("Loss/train", loss.item(), iteration_loss)
                iteration_loss += 1

                opt.zero_grad()
                loss.backward()
                opt.step()

                if verbose:
                    if i % 100 == 0:
                        t_now = time()
                        print(
                            "Training at Epoch "
                            + str(epo + 1)
                            + " iteration "
                            + str(i)
                            + " with loss "
                            + str(loss.cpu().detach().numpy())[:7]
                            + ". Total time "
                            + str(int(t_now - t_start) / 3600)[:7]
                            + " hours"
                        )
                        ### record total run time

            if val is not None:
                ##### validate, select the best model up to now
                with torch.set_grad_enabled(False):
                    if self.binary:
                        ## binary: ROC-AUC, PR-AUC, F1, cross-entropy loss
                        auc, auprc, f1, loss, logits = self.test_(
                            validation_generator, self.model
                        )
                        lst = ["epoch " + str(epo)] + list(
                            map(float2str, [auc, auprc, f1])
                        )
                        valid_metric_record.append(lst)
                        if auc > max_auc:
                            model_max = copy.deepcopy(self.model)
                            max_auc = auc
                        if verbose:
                            print(
                                "Validation at Epoch "
                                + str(epo + 1)
                                + ", AUROC: "
                                + str(auc)[:7]
                                + " , AUPRC: "
                                + str(auprc)[:7]
                                + " , F1: "
                                + str(f1)[:7]
                                + " , Cross-entropy Loss: "
                                + str(loss)[:7]
                            )
                    else:
                        ### regression: MSE, Pearson Correlation, with p-value, Concordance Index
                        mse, r2, p_val, CI, logits, loss_val = self.test_(
                            validation_generator, self.model
                        )
                        lst = ["epoch " + str(epo)] + list(
                            map(float2str, [mse, r2, p_val, CI])
                        )
                        valid_metric_record.append(lst)
                        if mse < max_MSE:
                            model_max = copy.deepcopy(self.model)
                            max_MSE = mse
                        if verbose:
                            print(
                                "Validation at Epoch "
                                + str(epo + 1)
                                + " with loss:"
                                + str(loss_val.item())[:7]
                                + ", MSE: "
                                + str(mse)[:7]
                                + " , Pearson Correlation: "
                                + str(r2)[:7]
                                + " with p-value: "
                                + str(f"{p_val:.2E}")
                                + " , Concordance Index: "
                                + str(CI)[:7]
                            )
                            writer.add_scalar("valid/mse", mse, epo)
                            writer.add_scalar("valid/pearson_correlation", r2, epo)
                            writer.add_scalar("valid/concordance_index", CI, epo)
                            writer.add_scalar(
                                "Loss/valid", loss_val.item(), iteration_loss
                            )
                table.add_row(lst)
            else:
                model_max = copy.deepcopy(self.model)

        # load early stopped model
        self.model = model_max

        if val is not None:
            #### after training
            prettytable_file = os.path.join(
                self.result_folder, "valid_markdowntable.txt"
            )
            with open(prettytable_file, "w") as fp:
                fp.write(table.get_string())

        if test is not None:
            if verbose:
                print("--- Go for Testing ---")
            if self.binary:
                auc, auprc, f1, loss, logits = self.test_(
                    testing_generator, model_max, test=True
                )
                test_table = PrettyTable(["AUROC", "AUPRC", "F1"])
                test_table.add_row(list(map(float2str, [auc, auprc, f1])))
                if verbose:
                    print(
                        "Validation at Epoch "
                        + str(epo + 1)
                        + " , AUROC: "
                        + str(auc)[:7]
                        + " , AUPRC: "
                        + str(auprc)[:7]
                        + " , F1: "
                        + str(f1)[:7]
                        + " , Cross-entropy Loss: "
                        + str(loss)[:7]
                    )
            else:
                mse, r2, p_val, CI, logits, loss_test = self.test_(
                    testing_generator, model_max
                )
                test_table = PrettyTable(
                    ["MSE", "Pearson Correlation", "with p-value", "Concordance Index"]
                )
                test_table.add_row(list(map(float2str, [mse, r2, p_val, CI])))
                if verbose:
                    print(
                        "Testing MSE: "
                        + str(mse)
                        + " , Pearson Correlation: "
                        + str(r2)
                        + " with p-value: "
                        + str(f"{p_val:.2E}")
                        + " , Concordance Index: "
                        + str(CI)
                    )
            np.save(
                os.path.join(
                    self.result_folder,
                    str(self.drug_encoding)
                    + "_"
                    + str(self.target_encoding)
                    + "_logits.npy",
                ),
                np.array(logits),
            )

            ######### learning record ###########

            ### 1. test results
            prettytable_file = os.path.join(
                self.result_folder, "test_markdowntable.txt"
            )
            with open(prettytable_file, "w") as fp:
                fp.write(test_table.get_string())

        ### 2. learning curve
        fontsize = 16
        iter_num = list(range(1, len(loss_history) + 1))
        plt.figure(3)
        plt.plot(iter_num, loss_history, "bo-")
        plt.xlabel("iteration", fontsize=fontsize)
        plt.ylabel("loss value", fontsize=fontsize)
        pkl_file = os.path.join(self.result_folder, "loss_curve_iter.pkl")
        with open(pkl_file, "wb") as pck:
            pickle.dump(loss_history, pck)

        fig_file = os.path.join(self.result_folder, "loss_curve.png")
        plt.savefig(fig_file)
        if verbose:
            print("--- Training Finished ---")
            writer.flush()
            writer.close()

    def predict(self, df_data):
        """
        Performs prediction on the given DataFrame of data.

        Parameters:
                df_data (pd.DataFrame): The DataFrame containing the data to be predicted. It should include the necessary features for the model to make predictions.

        Returns:
                varies: The prediction results, which can be a list of scores, probabilities, or class labels depending on the model's task and configuration.
        """
        print("predicting...")
        info = data_process_loader(
            df_data.index.values, df_data.Label.values, df_data, **self.config
        )
        self.model.to(self.device)
        params = {
            "batch_size": self.config["batch_size"],
            "shuffle": False,
            "num_workers": self.config["num_workers"],
            "drop_last": False,
            "sampler": SequentialSampler(info),
        }

        if self.drug_encoding == "MPNN":
            params["collate_fn"] = mpnn_collate_func
        elif self.drug_encoding in [
            "DGL_GCN",
            "DGL_NeuralFP",
            "DGL_GIN_AttrMasking",
            "DGL_GIN_ContextPred",
            "DGL_AttentiveFP",
        ]:
            params["collate_fn"] = dgl_collate_func

        generator = data.DataLoader(info, **params)

        score = self.test_(generator, self.model, repurposing_mode=True)
        return score

    def save_model(self, path_dir):
        """
        Saves the model's state dictionary and configuration to the specified directory.

        Parameters:
                path_dir (str): Directory path where the model and configuration will be saved.
                                           If the directory doesn't exist, it will be created.

        Returns:
                None

        Note:
                The model state is saved as 'model.pt' and the configuration as a YAML file
                'config.yaml' in the specified directory.
        """
        os.makedirs(path_dir, exist_ok=True)

        # Save model state
        model_path = os.path.join(path_dir, "model.pt")
        torch.save(self.model.state_dict(), model_path)

        # Save config as YAML
        config_path = os.path.join(path_dir, "config.yaml")
        with open(config_path, "w") as f:
            yaml.dump(self.config, f, default_flow_style=False, sort_keys=False)

        # Also save as dict for backward compatibility
        save_dict(path_dir, self.config)

    def load_pretrained(self, path):
        """
        Loads a pretrained model from the specified path.

        Parameters:
                path (str): Path to the pretrained model file (.pt file)

        Returns:
                None

        Note:
                This method handles both single-GPU and multi-GPU (DataParallel) model states.
                For multi-GPU models, it removes the 'module.' prefix from state dict keys.
        """
        os.makedirs(os.path.dirname(path), exist_ok=True)

        state_dict = torch.load(path, map_location=torch.device("cpu"))

        # Handle DataParallel model states
        if next(iter(state_dict))[:7] == "module.":
            from collections import OrderedDict

            state_dict = OrderedDict((k[7:], v) for k, v in state_dict.items())

        self.model.load_state_dict(state_dict)
        self.binary = self.config["binary"]
