from __future__ import annotations

import os

from ray import train, tune
from ray.rllib.algorithms import Algorithm
from ray.tune.registry import get_trainable_cls

from customization.callbacks import CheckpointCallback


def print_algo_cls(cls_list: list[str]):
    for cls in cls_list:
        trainable = get_trainable_cls(cls)
        print(cls, trainable.__module__)


def train_test(cls: str, save_dir: str):
    if cls in ["DDPG", "TD3"]:
        env_name = "Pendulum-v1"
    else:
        env_name = "CartPole-v1"

    algo_cls: Algorithm = get_trainable_cls(cls)
    config = (
        algo_cls.get_default_config()
        .framework("torch")
        .environment(env_name)
        .resources(num_gpus=1)
        .callbacks(CheckpointCallback)
        .debugging(log_level="DEBUG")
    )

    tuner = tune.Tuner(
        algo_cls,
        param_space=config,
        run_config=train.RunConfig(
            name=f"{cls}_train_test",
            storage_path=save_dir,
            stop={"training_iteration": 20},
            checkpoint_config=train.CheckpointConfig(
                checkpoint_at_end=True,
                checkpoint_score_attribute="episode_reward_mean",
                checkpoint_score_order="max",
            ),
        ),
    )

    return tuner.fit()


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser("RLlib algo test")
    parser.add_argument(
        "--save_dir",
        type=str,
        default=os.path.expanduser("~/ray_results"),
    )
    args = parser.parse_args()

    algo_list = ["SAC", "A2C", "A3C", "TRPO", "PPO", "DDPG", "DQN", "DDQN", "PG", "TD3"]
    print_algo_cls(algo_list)

    for algo in algo_list:
        print(f"Testing {algo}...")
        results = train_test(algo, args.save_dir)
        best_result = results.get_best_result("episode_reward_mean", "max")
        print(f"Results for {algo}: {best_result}")
        print("-" * 40)
