import os
import time
import argparse
import yaml

import ray
from ray import air, tune
from ray.tune.registry import get_trainable_cls
from ray.tune.logger import UnifiedLogger
import gymnasium as gym
import torch



def custom_logger_creator(log_dir):
    def logger_creator(config):
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        return UnifiedLogger(config, log_dir, loggers=None)
    return logger_creator


def load_config(config_path):
    with open(config_path, 'r') as f:
        config = yaml.safe_load(f)
    return config


def train_once(config, cli_args=None):
    ray.init()

    # 处理命令行优先级
    algo_name = (cli_args.algo if cli_args and cli_args.algo else config.get("algo", "PPO")).upper()
    env_name = cli_args.env if cli_args and cli_args.env else config.get("env", "HalfCheetah-v4")
    num_workers = cli_args.num_workers if cli_args and cli_args.num_workers is not None else config.get("num_workers", 2)
    num_learners = cli_args.num_learners if cli_args and cli_args.num_learners is not None else config.get("num_learners", 1)
    max_time = cli_args.max_time if cli_args and cli_args.max_time is not None else config.get("max_time", 600)
    target_reward = cli_args.target_reward if cli_args and cli_args.target_reward is not None else config.get("target_reward", -1)
    seed = cli_args.seed if cli_args and cli_args.seed is not None else config.get("seed", 42)
    remark = cli_args.remark if cli_args and cli_args.remark is not None else config.get("remark", '')

    algo_config = get_trainable_cls(algo_name)

    # 基础配置
    algo_config = algo_config.get_default_config()
    algo_config = algo_config.api_stack(  # 使用新api
        enable_rl_module_and_learner=True,
        enable_env_runner_and_connector_v2=True,
    )
    algo_config = algo_config.environment(env=env_name)
    algo_config = algo_config.debugging(seed=seed)

    # 分布式配置
    algo_config = algo_config.env_runners(
        num_env_runners=num_workers,
        num_envs_per_env_runner=1,
        num_cpus_per_env_runner=1,
    )

    # 自动识别 GPU
    available_gpus = torch.cuda.device_count()
    if available_gpus > 0:
        num_gpus_per_learner = available_gpus / num_learners
    else:
        num_gpus_per_learner = 0

    algo_config = algo_config.resources(num_gpus=0)  # 先置0，learner里面配置
    algo_config = algo_config.learners(
        num_learners=num_learners,
        num_gpus_per_learner=num_gpus_per_learner,
    )
    algo_config = algo_config.resources(num_gpus=0)

    # log目录，格式: ./logs/算法名/环境名/seed{}_{num_workers}_{num_learners}
    log_dir = f"./logs/{algo_name}/{env_name}/seed{seed}_{num_workers}_{num_learners}"
    if remark:
        log_dir += f"_{remark}"

    # 训练器
    algo = algo_config.build(
        logger_creator=custom_logger_creator(log_dir)
    )

    print(f"Starting training with {algo_name} on {env_name}...")
    
    start_time = time.time()
    total_samples = 0

    while True:
        result = algo.train()

        total_samples += result["timesteps_total"]

        elapsed_time = time.time() - start_time

        # 输出主要指标
        print(
            f"Iter: {result['training_iteration']} | "
            f"Reward: {result['episode_reward_mean']:.2f} | "
            f"FPS: {result['timesteps_total'] / elapsed_time:.2f} | "
            f"Elapsed: {elapsed_time/60:.2f}min"
        )

        # 判断停止条件
        if max_time > 0 and elapsed_time > max_time:
            print("Reached max training time. Stopping...")
            break

        if target_reward > 0 and result["episode_reward_mean"] >= target_reward:
            print("Reached target reward. Stopping...")
            break

    final_fps = result['timesteps_total'] / elapsed_time
    print(f"Final Average FPS: {final_fps:.2f}")

    algo.stop()
    ray.shutdown()


def batch_train(config_dir, cli_args=None):
    config_files = [os.path.join(config_dir, f) for f in os.listdir(config_dir) if f.endswith('.yaml')]
    for config_path in config_files:
        print(f"\n=== Running {config_path} ===")
        config = load_config(config_path)
        train_once(config, cli_args)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", type=str, help="Path to single YAML config file")
    parser.add_argument("--config-dir", type=str, help="Path to directory containing multiple YAML config files")

    # 这里增加额外超参数
    parser.add_argument("--algo", type=str, help="Override algorithm name (e.g., PPO, DQN, SAC)")
    parser.add_argument("--env", type=str, help="Override environment name")
    parser.add_argument("--num-workers", type=int, help="Override number of workers")
    parser.add_argument("--num-learners", type=int, help="Override number of learners")
    parser.add_argument("--max-time", type=float, help="Override maximum training time (seconds)")
    parser.add_argument("--target-reward", type=float, help="Override target reward to stop training")
    parser.add_argument("--seed", type=int, help="Override random seed")
    parser.add_argument("--remark", type=str, help="Remark for experiment")

    args = parser.parse_args()

    if args.config:
        config = load_config(args.config)
        train_once(config, args)
    elif args.config_dir:
        batch_train(args.config_dir, args)
    else:
        raise ValueError("Please provide either --config or --config-dir")
