"""This is a (long-running) regression test to ensure code changes have not impacted
learning. It compares the mean episode reward of a newly trained agent with that
of a baseline from a past (baseline) commit. It is set to run for one hour.
"""
import multiprocessing
from pathlib import Path

# Make sure to install rllib dependencies using the command "pip install -e .[test]" before running the test
from ray import tune
from ray.rllib.models import ModelCatalog

from smarts.core.utils import import_utils
from smarts.core.utils.file import make_dir_in_smarts_log_dir
from smarts.env.rllib_hiway_env import RLlibHiWayEnv

HORIZON = 5000

import_utils.import_module_from_file(
    "examples", Path(__file__).parents[1] / "__init__.py"
)


def test_learning_regression_rllib():
    from examples.e12_rllib.rllib_agent import TrainingModel, rllib_agent

    ModelCatalog.register_custom_model(TrainingModel.NAME, TrainingModel)
    rllib_policies = {
        "policy": (
            None,
            rllib_agent["observation_space"],
            rllib_agent["action_space"],
            {"model": {"custom_model": TrainingModel.NAME}},
        )
    }

    # XXX: We should be able to simply provide "scenarios/sumo/loop"?
    scenario_path = Path(__file__).parents[2] / "scenarios/sumo/loop"
    scenario_path = str(scenario_path.absolute())

    tune_config = {
        "disable_env_checking": True,
        "env": RLlibHiWayEnv,
        "env_config": {
            "scenarios": [scenario_path],
            "seed": 42,
            "headless": True,
            "agent_specs": {"Agent-007": rllib_agent["agent_spec"]},
        },
        "multiagent": {
            "policies": rllib_policies,
            "policy_mapping_fn": lambda agent_id, episode, worker, **kwargs: "policy",
        },
        "framework": "tf2",
        "log_level": "WARN",
        "num_workers": multiprocessing.cpu_count() - 1,
        "horizon": HORIZON,
    }

    analysis = tune.run(
        "PPO",
        name="learning_regression_test",
        stop={"training_iteration": 60},
        max_failures=0,
        local_dir=make_dir_in_smarts_log_dir("smarts_learning_regression"),
        config=tune_config,
    )

    df = analysis.dataframe()

    # Lower-bound 95% confidence interval of mean reward after one hour, generated by manual analysis.
    # If you need to update this, run tools/regression_rllib.py.
    ci95_reward_file = Path(__file__).parent / "ci95_reward_lo"
    with open(ci95_reward_file.absolute()) as f:
        CI95_REWARD_MEAN_1_HOUR = float(f.readline())

    assert (
        df["episode_reward_mean"][0] >= CI95_REWARD_MEAN_1_HOUR
    ), "Mean reward did not reach the expected value ({} < {})".format(
        df["episode_reward_mean"][0], CI95_REWARD_MEAN_1_HOUR
    )
