# -*- coding: utf-8 -*-

import os
import argparse
import pprint

import random
import time
import datetime

import torch
import math
import numpy as np

from utils.config import YamlConfig, ConfigDict
from utils.exp_utils import Log, plotSmoothAndSaveFig

# from enjoy import eval_policy

import ray
from ray.tune.registry import get_trainable_cls, register_env
from ray.air.constants import TRAINING_ITERATION
from ray.rllib.utils.metrics import *
from ray.tune.logger import pretty_print


"""
nohup xxx > out1.txt 2>&1 &
python run.py --env HalfCheetah-v5 --policy PPO --num_rollout_workers 16 --num_learners 2

"""


def parse_args() -> argparse.Namespace:
    # configurations
    parser = argparse.ArgumentParser(description="Pytorch RL rl_algorithms")

    parser.add_argument("--env", default="RayKdSimOp-v0", help="OpenAI gym environment name")
    parser.add_argument("--policy", default="PPO", help="policy name (TD3, PPO, SAC)")
    parser.add_argument("--total_episodes", type=int, default=None, help="total episodes num")
    parser.add_argument("--stop_reward", type=float, default=None, help="stop reward")
    parser.add_argument("--eval_freq", type=int, default=None, help="eval model freq")
    parser.add_argument("--save_freq", type=int, default=None, help="save model freq")
    parser.add_argument("--eval_episodes", type=int, default=None, help="episode of test during training")
    parser.add_argument("--max_episode_steps", type=int, default=None, help="max episode step")
    parser.add_argument("--load_model", default="", help="Model load file name")
    parser.add_argument("--test", dest="test", action="store_true", help="test mode (no training)")
    # --- Experimental hyperparameter ---
    parser.add_argument("--seed", type=int, default=0, help="random seed for reproducibility")
    parser.add_argument("--label", type=str, default='rllib')
    parser.add_argument("--remark", type=str, default='')

    parser.add_argument("--num_rollout_workers", type=int, default=1, help="num_rollout_workers")
    parser.add_argument("--num_learners", type=int, default=1, help="num_learners")

    parser.add_argument("--gamma", type=float, default=None, help="gamma")
    parser.add_argument("--lambda_", type=float, default=None, help="lambda_")
    parser.add_argument("--entropy_coeff", type=float, default=None, help="entropy_coeff")
    parser.add_argument("--lr", type=float, default=None, help="lr")

    return parser.parse_args()

def init_seed(seed):
    # Disable cudnn to maximize reproducibility
    torch.cuda.cudnn_enabled = False
    torch.backends.cudnn.deterministic = True
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)

def main():
    """Main."""
    args = parse_args()

    # set a random seed
    init_seed(args.seed)

    # === config ===
    NOWTIMES = datetime.datetime.now()
    curr_time = NOWTIMES.strftime("%y%m%d_%H%M%S")
    exp_path = os.path.join('checkpoint', str(args.label)) if len(str(args.label)) > 0 else 'checkpoint'
    exp_path = os.path.join(exp_path, str(args.env).replace('-', '_').lower(), str(args.policy).lower())
    remark_str = f'_workers{args.num_rollout_workers}_learners{args.num_learners}'
    if args.remark != '': remark_str += ('_' + args.remark)
    exp_path = os.path.join(exp_path, 'seed' + str(args.seed) + remark_str + '_' + str(curr_time))
    if not args.test:
        os.makedirs(exp_path, exist_ok=True)
    print("---------------------------------------")
    print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}")
    print(f"Checkpoint: {exp_path}")
    print("---------------------------------------")
    if args.load_model != "":
        if 'mountaincarcontinuous' in args.load_model:
            args.env = 'MountainCarContinuous-v0'
    cfg_name = args.env
    cfg_policy = str(args.policy).lower()
    cfg_path = os.path.join('configs', str(cfg_name).replace('-', '_').lower(), str(cfg_policy).lower() + '.yaml')
    cfg = YamlConfig(cfg_path).get_config_dict()
    cfg.env = args.env
    cfg.exp_path = exp_path
    cfg.policy = args.policy
    cfg.seed = args.seed
    cfg.test = args.test
    # --- Experimental parameter ---
    if args.total_episodes is not None:
        cfg.hyper_params.total_episodes = args.total_episodes
    if args.stop_reward is not None:
        cfg.hyper_params.stop_reward = args.stop_reward
    if args.eval_freq is None:
        args.eval_freq = cfg.hyper_params.total_episodes / 100
        # args.eval_freq = 10
    if args.save_freq is None:
        args.save_freq = cfg.hyper_params.total_episodes / 10
        # args.save_freq = 5000
    if args.eval_episodes is None:
        args.eval_episodes = 20
    cfg.hyper_params.eval_freq = args.eval_freq
    cfg.hyper_params.save_freq = args.save_freq
    cfg.hyper_params.eval_episodes = args.eval_episodes

    # --- 算法配置 ---
    # GPU分配
    available_gpus = torch.cuda.device_count()
    print(f'Available GPUs: {available_gpus}')
    if available_gpus > 0:
        num_gpus_per_learner = available_gpus / args.num_learners
    else:
        num_gpus_per_learner = 0

    # 老版本配置
    # 新版本配置
    alg_config = (
        get_trainable_cls(args.policy)
        .get_default_config()
        .debugging(seed=args.seed)  # 设置随机种子
        .api_stack(  # 使用新api
            enable_rl_module_and_learner=True,
            enable_env_runner_and_connector_v2=True,
        )
        .environment(
            env=cfg.env,
        )
        .env_runners(
            num_env_runners=args.num_rollout_workers,  # 多少个环境运行器（单机多核），主进程采样则0，env运行器可以同时管理多个环境
            num_envs_per_env_runner=1,  # 每个环境运行器管理的环境示例数
            num_cpus_per_env_runner=1,
        )
        .evaluation(
            evaluation_interval=cfg.hyper_params.eval_freq,
            evaluation_duration=args.eval_episodes,
            evaluation_num_env_runners=1,  # 并行评估数量
            evaluation_parallel_to_training=True,  # 并行评估
        )
        .checkpointing(export_native_model_files=True)
        .resources(num_gpus=0)
        .learners(
            num_learners=args.num_learners,  # 需要多少个learner
            num_gpus_per_learner=num_gpus_per_learner,  # 每个learner需要多少gpu，可以分数
        )
    )

    # 超参数配置
    if cfg.get('training_params') is not None:
        if args.gamma is not None:
            cfg['training_params']['gamma'] = args.gamma
        if args.lambda_ is not None:
            cfg['training_params']['lambda_'] = args.lambda_
        if args.entropy_coeff is not None:
            cfg['training_params']['entropy_coeff'] = args.entropy_coeff
        if args.lr is not None:
            cfg['training_params']['lr'] = args.lr
        alg_config.training(**cfg.get('training_params'))

    # 评估测试
    if args.test:
        # if args.load_model != "":
        #     print("policy load ...")
        #     # policy = RLModule.from_checkpoint(args.load_model)
        #     algo = alg_config.build()
        #     cur_dir = os.path.dirname(os.path.abspath(__file__))
        #     algo.restore(os.path.join(cur_dir, args.load_model))
        #     print("policy eval ->")
        #     res = eval_policy(algo, args.env, 666, eval_episodes=100, render=True, save_gif=False)
        #     print(res)
        return

    # Initialize Ray.
    # ray.init()

    # --- 开始训练 ---
    train_log = Log(os.path.join(cfg.exp_path, "e.log"))

    train_record_fig = os.path.join(cfg.exp_path, "train_episode_rewards.jpg")
    test_record_fig = os.path.join(cfg.exp_path, "test_episode_rewards.jpg")

    evaluations = []
    train_episode_rewards = []
    actor_loss = []
    critic_loss = []

    best_test_reward = -math.inf
    algo = alg_config.build_algo()
    stop = {
        f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}": cfg.hyper_params.stop_reward,
        TRAINING_ITERATION: cfg.hyper_params.total_episodes,
    }
    train_log.record(pprint.pformat(alg_config.to_dict()))
    train_log.record("开始训练 ->")

    # 把工作路径改回来
    cur_dir = os.path.dirname(os.path.abspath(__file__))
    os.chdir(cur_dir)

    start_time = time.time()
    use_time = 0

    for i in range(stop.get(TRAINING_ITERATION, cfg.hyper_params.total_episodes)):
        results = algo.train()
        use_time = time.time() - start_time

        # train_log.record(pretty_print(results))
        if ENV_RUNNER_RESULTS in results:
            mean_return = results[ENV_RUNNER_RESULTS].get(
                EPISODE_RETURN_MEAN, np.nan
            )
            # print(f"iter={i} R={mean_return}", end="")
            timestr = f"{int(use_time//60//60)}h {int(use_time//60%60)}m {int(use_time%60)}s"
            datestr = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            train_log.record(f"[{datestr}] [{timestr}] iter={i} R={mean_return}")

            train_episode_rewards.append(mean_return)
            if LEARNER_RESULTS in results:
                actor_loss.append(results[LEARNER_RESULTS]['default_policy']['policy_loss'])
                critic_loss.append(results[LEARNER_RESULTS]['default_policy']['vf_loss'])
            
        if EVALUATION_RESULTS in results:
            # train_log.record('-------------------')
            # train_log.record(pretty_print(results[EVALUATION_RESULTS][ENV_RUNNER_RESULTS]))
            Reval = results[EVALUATION_RESULTS][ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN]
            evaluations.append(Reval)

            plotSmoothAndSaveFig(50, train_episode_rewards, train_record_fig)
            plotSmoothAndSaveFig(50, evaluations, test_record_fig)
            plotSmoothAndSaveFig(1, actor_loss, os.path.join(cfg.exp_path, "train_actor_loss.jpg"))
            plotSmoothAndSaveFig(1, critic_loss, os.path.join(cfg.exp_path, "train_critic_loss.jpg"))

            if Reval > best_test_reward:
                best_test_reward = Reval
                # print(f'=> save best_model')
                train_log.record(f'=> save best_model')
                algo.save(os.path.abspath(os.path.join(cfg.exp_path, f"{'best'}")))

            # print(f" R(eval)={Reval}", end="")
            train_log.record(f" R(eval)={Reval}")
        # print()
        train_log.record('\n')

        for key, threshold in stop.items():
            val = results
            for k in key.split("/"):
                try:
                    val = val[k]
                except KeyError:
                    val = None
                    break
            if val is not None and not np.isnan(val) and val >= threshold:
                # print(f"Stop criterium ({key}={threshold}) fulfilled!")
                train_log.record(f"Stop criterium ({key}={threshold}) fulfilled!")
                algo.save(os.path.join(cfg.exp_path, f"{'last'}"))
                ray.shutdown()
                return
        if use_time > cfg.hyper_params.stop_time:
            algo.save(os.path.abspath(os.path.join(cfg.exp_path, f"params_ep_{str(i+1)}")))
            return

        # Save model
        if (i + 1) % cfg.hyper_params.save_freq == 0:
            algo.save(os.path.abspath(os.path.join(cfg.exp_path, f"params_ep_{str(i+1)}")))

    ray.shutdown()
    return


if __name__ == "__main__":
    main()
