# training.py

import os
import argparse
from stable_baselines3 import SAC
from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.monitor import Monitor

# 导入我们的自定义环境
from SwingArmSuspensionEnv import SwingArmSuspensionEnv

def main():
    parser = argparse.ArgumentParser(description="Train or evaluate a DRL agent for active suspension control.")
    
    # 创建互斥组，因为不能同时训练和评估
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument("--train", action="store_true", help="Train a new model.")
    group.add_argument("--eval", type=str, metavar="MODEL_PATH", help="Evaluate a pre-trained model.")

    parser.add_argument("--render", action="store_true", help="Enable real-time animation during training or evaluation.")
    parser.add_argument("--timesteps", type=int, default=100000, help="Total timesteps for training.")

    args = parser.parse_args()

    # --- 文件路径设置 ---
    log_dir = "logs/"
    model_dir = "models/"
    best_model_path = os.path.join(model_dir, "best_model.zip")
    final_model_path = os.path.join(model_dir, "sac_suspension_final.zip")
    os.makedirs(log_dir, exist_ok=True)
    os.makedirs(model_dir, exist_ok=True)

    # --- 环境设置 ---
    render_mode = "human" if args.render else None
    env = SwingArmSuspensionEnv(render_mode=render_mode)
    env = Monitor(env) # 用于在训练时记录统计信息

    # --- 模式选择 ---
    if args.train:
        print("--- Starting Training ---")
        
        # 使用EvalCallback在训练期间定期评估并保存最佳模型
        eval_callback = EvalCallback(env, best_model_save_path=model_dir,
                                     log_path=log_dir, eval_freq=5000,
                                     deterministic=True, render=False)

        model = SAC(
            "MlpPolicy",
            env,
            verbose=1,
            learning_rate=3e-4,
            buffer_size=100_000,
            batch_size=256,
            ent_coef='auto',
            gamma=0.99,
            tau=0.005,
            train_freq=1,
            gradient_steps=1,
            learning_starts=10000,
            tensorboard_log=log_dir
        )

        try:
            model.learn(total_timesteps=args.timesteps, callback=eval_callback)
            model.save(final_model_path)
            print(f"\nTraining complete. Final model saved to {final_model_path}")
            print(f"Best performing model saved to {best_model_path}")
        except KeyboardInterrupt:
            print("\nTraining interrupted. Saving current model...")
            model.save(final_model_path)
            print(f"Model saved to {final_model_path}")

    elif args.eval:
        print(f"--- Evaluating Model: {args.eval} ---")
        if not os.path.exists(args.eval):
            print(f"Error: Model path not found: {args.eval}")
            return
        
        if not args.render:
            print("Warning: Evaluation without rendering. Use --render to see the animation.")

        model = SAC.load(args.eval, env=env)
        
        # 运行多个回合进行评估
        num_episodes = 5
        for ep in range(num_episodes):
            obs, info = env.reset()
            done = False
            truncated = False
            episode_reward = 0
            while not (done or truncated):
                action, _states = model.predict(obs, deterministic=True)
                obs, reward, done, truncated, info = env.step(action)
                episode_reward += reward
            print(f"Episode {ep + 1}: Total Reward = {episode_reward:.2f}")

    env.close()

if __name__ == "__main__":
    main()
