import argparse, os, json, pandas as pd
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
from stable_baselines3 import PPO
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.callbacks import EvalCallback, CheckpointCallback
from stable_baselines3.common.logger import configure
from star_env import StarEnv
from preprocess.preprocess import load_catalog
from rl_env.callbacks import InfoAveragingCallback

def load_yaml(path):
    import yaml
    with open(path, 'r') as f:
        return yaml.safe_load(f)

def make_env(df, env_cfg):
    return lambda: StarEnv(df, env_cfg)

def build_algo(name, env, cfg, logdir):
    common = dict(verbose=1, device=cfg['algo'].get('device', 'cpu'),
                  tensorboard_log=logdir)
    if name.upper() == "SAC":
        return SAC("MlpPolicy", env,
                   learning_rate=cfg['algo']['lr'],
                   buffer_size=cfg['algo']['buffer_size'],
                   batch_size=cfg['algo']['batch_size'],
                   learning_starts=cfg['algo']['learning_starts'],
                   train_freq=cfg['algo']['train_freq'],
                   gradient_steps=cfg['algo']['gradient_steps'],
                   gamma=cfg['algo']['gamma'],
                   **common)
    # default: PPO
    return PPO("MlpPolicy", env,
               learning_rate=cfg['algo']['lr'],
               gamma=cfg['algo']['gamma'],
               ent_coef=cfg['algo']['ent_coef'],
               batch_size=cfg['algo']['batch_size'],
               n_steps=cfg['algo']['n_steps'],
               clip_range=cfg['algo']['clip_range'],
               **common)

if __name__ == "__main__":
    ap = argparse.ArgumentParser()
    ap.add_argument("--cfg", default="configs/default.yaml")
    ap.add_argument("--catalog", default=None)      # 可覆盖配置
    ap.add_argument("--steps", type=int, default=None)
    ap.add_argument("--n-env", type=int, default=None)
    args = ap.parse_args()

    cfg = load_yaml(args.cfg)
    if args.catalog: cfg['catalog'] = args.catalog
    if args.steps:   cfg['algo']['total_steps'] = args.steps
    if args.n_env:   cfg['algo']['n_envs'] = args.n_env

    df = load_catalog(cfg['catalog'])
    env_cfg = cfg['env']

   # 关键：向量化环境一定放在 __main__ 里创建
    vec_env = make_vec_env(make_env(df, env_cfg), n_envs=cfg['algo']['n_envs'])

    logdir = cfg['algo']['tb_logdir']
    os.makedirs(logdir, exist_ok=True)
    os.makedirs(cfg['algo']['save_dir'], exist_ok=True)

    # 关键：配置 SB3 logger 写 TensorBoard
    sb3_logger = configure(logdir, ["stdout", "tensorboard", "csv"])
    model = build_algo(cfg['algo']['name'], vec_env, cfg, logdir)
    model.set_logger(sb3_logger)

    # 评估与检查点
    eval_env = make_env(df, env_cfg)()
    eval_callback = EvalCallback(
        eval_env, best_model_save_path=cfg['algo']['save_dir'],
        log_path=logdir, eval_freq=10_000, deterministic=True
    )
    ckpt_callback = CheckpointCallback(save_freq=50_000,
                                       save_path=cfg['algo']['save_dir'],
                                       name_prefix=f"{cfg['algo']['name'].lower()}_ckpt")
    
    # 自检：防止 total_steps < n_envs*n_steps（会只训一次）
    rollout_size = cfg['algo']['n_envs'] * cfg['algo']['n_steps']      # ★
    if cfg['algo']['total_steps'] < rollout_size:
        print(f"[WARN] total_steps({cfg['algo']['total_steps']}) < rollout_size({rollout_size}). "
            f"Auto-bumping to {rollout_size*100}.")
        cfg['algo']['total_steps'] = rollout_size * 100   
    # 真正开始训练（这一步执行后才会生成 events.*）
    biz_cb = InfoAveragingCallback(eval_env, eval_freq=10_000, n_eval_episodes=3)

    model.learn(
        total_timesteps=cfg['algo']['total_steps'],
        callback=[eval_callback, ckpt_callback, biz_cb],
        progress_bar=True,         # ★ 关键：进度条
    )

    model.save(os.path.join(cfg['algo']['save_dir'],
                            f"{cfg['algo']['name'].lower()}_star_final"))