"""
Example of hyperparameter search in MLflow using simple random search.
The run method will evaluate random combinations of parameters in a new MLflow run.
The runs are evaluated based on validation set loss. Test set score is calculated to verify the
results.
Several runs can be run in parallel.
"""

from concurrent.futures import ThreadPoolExecutor
import click
import numpy as np
import mlflow
import mlflow.sklearn
import mlflow.tracking
import mlflow.projects
from mlflow.tracking.client import MlflowClient

_inf = np.finfo(np.float64).max

@click.command(help="Perform grid search over train (main entry point).")
@click.option("--max-runs", type=click.INT, default=20, help="Maximum number of runs to evaluate.")
@click.option("--max-p", type=click.INT, default=1, help="Maximum number of parallel runs.")
@click.option("--epochs", type=click.INT, default=50, help="Number of epochs")
@click.option("--metric", type=click.STRING, default="rmse", help="Metric to optimize on.")
@click.option("--seed", type=click.INT, default=97531, help="Seed for the random generator")
@click.option("--training-data", type=click.STRING, default="http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv",\
    help="Input dataset link.")

def run(training_data, max_runs, max_p, epochs, metric, seed):
    '''
    like main file. Input parameters are fwom the python click function.
    '''
    train_metric = "train_{}".format(metric)
    val_metric = "val_{}".format(metric)
    test_metric = "test_{}".format(metric)
    np.random.seed(seed)    # 用于生成指定随机数。从每堆种子里选出来的数都是不会变的，从不同的堆里选随机种子每次都不一样，若想每次都能得到相同的随机数，每次产生随机数之前，都需要调用一次seed()。
    tracking_client = mlflow.tracking.MlflowClient()

    def new_eval(
        nepochs, experiment_id, null_train_loss=_inf, null_val_loss=_inf, null_test_loss=_inf
    ):
        def eval(parms):
            lr, momentum = parms
            # 每次run keras对模型的训练：train.py 代码，都会新建一个新的run id，所以训练出来的模型也会保存在这个新的run目录下面。
            # 生成多少个run id取决于参数“max_runs”
            with mlflow.start_run(nested=True) as child_run:
                # run train.py 代码，以训练模型（固定的momentum以及lr值）
                # 关于mlflow.projects.run的说明，参见https://mlflow.org/docs/latest/python_api/mlflow.projects.html#
                p = mlflow.projects.run(
                    run_id=child_run.info.run_id,
                    uri=".",
                    entry_point="train",
                    parameters={
                        "training_data": training_data,
                        "epochs": str(nepochs),
                        "learning_rate": str(lr),
                        "momentum": str(momentum),
                        "seed": str(seed),
                    },
                    experiment_id=experiment_id,
                    synchronous=False,
                )
                # 注意 uri的写法。
                # 情况一：如果我们直接通过git来run这个MLFlow Project，即：
                # mlflow run -e random --experiment-id <hyperparam_experiment_id> https://gitee.com/yichaoyyds/mlflow-ex-hyperparametertunning.git
                # 这个情况下，设置 uri="."。
                # 情况二：如果我们先把这个git clone下来，然后在本地运行这个MLFlow Project，即：
                # mlflow run -e random --experiment-id <hyperparam_experiment_id> ./mlflow-ex-hyperparametertunning
                # 这个情况下设置uri为"."会报错：stderr: 'fatal: 'XXX/mlflow-ex-hyperparametertunning' does not appear to be a git repository
                # 这个情况下，设置 uri="https://gitee.com/yichaoyyds/mlflow-ex-hyperparametertunning.git"。
                succeeded = p.wait()
                mlflow.log_params({"lr": lr, "momentum": momentum}) # 记录当下本次run的lr和momentum值
            if succeeded:
                training_run = tracking_client.get_run(p.run_id)
                metrics = training_run.data.metrics
                # cap the loss at the loss of the null model
                train_loss = min(null_train_loss, metrics[train_metric])
                val_loss = min(null_val_loss, metrics[val_metric])
                test_loss = min(null_test_loss, metrics[test_metric])
            else:
                # run failed => return null loss
                tracking_client.set_terminated(p.run_id, "FAILED")
                train_loss = null_train_loss
                val_loss = null_val_loss
                test_loss = null_test_loss
            mlflow.log_metrics(
                {
                    "train_{}".format(metric): train_loss,
                    "val_{}".format(metric): val_loss,
                    "test_{}".format(metric): test_loss,
                }
            )
            return p.run_id, train_loss, val_loss, test_loss

        return eval

    # Create and start a parent run.
    with mlflow.start_run() as run:
        experiment_id = run.info.experiment_id
        # First run, kind of initialization and get initial loss
        # Notice "(0,0)" in "new_eval(0, experiment_id)((0, 0))". 
        # First part "(0, experiment_id)" is the parameters for "new_eval"
        # Second part "(0,0)" is the parameters for "eval" function inside "new_eval".
        _, null_train_loss, null_val_loss, null_test_loss = new_eval(0, experiment_id)((0, 0))
        # Randomly select momentum and lr parameters
        # numpy.random.uniform(low,high,size) 从一个均匀分布[low,high)中随机采样
        # runs is a lit with sets inside, for example, [(X,X),(X,X),...,(X,X)]. Its length is max_runs.
        runs = [(np.random.uniform(1e-5, 1e-1), np.random.uniform(0, 1.0)) for _ in range(max_runs)]
        # Like threading. It supports multi-thread running sevaral runs in parallelly.
        with ThreadPoolExecutor(max_workers=max_p) as executor:
            _ = executor.map(
                new_eval(epochs, experiment_id, null_train_loss, null_val_loss, null_test_loss),
                runs,
            )
        # All the runs are done. All models and related information are saved under mlruns folder locally.
        # find the best run, log its metrics as the final metrics of this run.
        client = MlflowClient()
        # Find out all child runs under parent run
        runs = client.search_runs(
            [experiment_id], "tags.mlflow.parentRunId = '{run_id}' ".format(run_id=run.info.run_id)
        )
        best_val_train = _inf
        best_val_valid = _inf
        best_val_test = _inf
        best_run = None
        # For all child runs, we get the RMSE value of validation results, and select the run with the lowest value.
        for r in runs:
            if r.data.metrics["val_rmse"] < best_val_valid:
                best_run = r
                best_val_train = r.data.metrics["train_rmse"]
                best_val_valid = r.data.metrics["val_rmse"]
                best_val_test = r.data.metrics["test_rmse"]
        
        # Set tag in parent run which contains best child run id.
        mlflow.set_tag("best_run", best_run.info.run_id)
        # Set best child run RMSE values.
        mlflow.log_metrics(
            {
                "train_{}".format(metric): best_val_train,
                "val_{}".format(metric): best_val_valid,
                "test_{}".format(metric): best_val_test,
            }
        )


if __name__ == "__main__":
    run()
