# train_stabilized_task.py
# 非配平增稳起飞专用训练代码（支持Optuna超参数优化）
from rl_core import RL_Trainer
from task13_env import StabilizedTakeoffEnv  # 导入改进后的环境类
import optuna
import logging
import sys

# 配置日志系统
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("results//training_stabilized.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


def objective(trial):
    """优化目标函数（适配增稳环境）"""
    try:
        # 超参数搜索空间（调整以适应增稳环境特性）
        hyperparams = {
            "learning_rate": trial.suggest_float("learning_rate", 3e-5, 3e-4, log=True),
            # 把 buffer_size 列表里的数字写成整数
            "buffer_size": trial.suggest_categorical("buffer_size", [100000, 500000, 1000000]),
            "batch_size": trial.suggest_int("batch_size", 128, 512, step=64),
            "gamma": trial.suggest_float("gamma", 0.98, 0.999),
            "tau": trial.suggest_float("tau", 0.001, 0.02),
            "ent_coef": trial.suggest_categorical("ent_coef", ["auto", 0.05, 0.1]),
            "train_freq": trial.suggest_int("train_freq", 1, 5),
            "policy_kwargs": {
                "net_arch": trial.suggest_categorical(
                    "net_arch",
                    [{"pi": [256, 256], "qf": [256, 256]}]
                )
            }
        }

        # 初始化训练器（使用增稳环境）
        trainer = RL_Trainer(
            env_class=StabilizedTakeoffEnv,
            algorithm="SAC",
            **hyperparams
        )

        # 训练配置（增加训练步数以应对复杂状态）
        trainer.train(
            total_timesteps=50000,
            model_name=f"stabilized_trial_{trial.number}",
            save_path="./optuna_stabilized/",
            checkpoint_freq=10000,
        )

        # 增强评估方法（考虑角速度稳定性）
        mean_reward = trainer.evaluate(
            episodes=10,
            # reward_threshold = 5000,  # 提升奖励阈值
            # stability_threshold = 0.2  # 新增角速度稳定性指标
        )

        logger.info(f"Trial {trial.number} - Avg Reward: {mean_reward:.1f}")
        return mean_reward

    except Exception as e:
        logger.error(f"Trial {trial.number} failed: {str(e)}")
        return float('-inf')


def main():
    """主训练流程（适配增稳需求）"""
    try:
        # 初始化Optuna Study（添加剪枝机制）
        sampler = optuna.samplers.TPESampler(n_startup_trials=5)
        study = optuna.create_study(
            direction="maximize",
            sampler=sampler,
            pruner=optuna.pruners.MedianPruner(n_startup_trials=5, n_warmup_steps=10)
        )

        # 执行优化（增加并行度）
        study.optimize(objective, n_trials=30, timeout=7200, n_jobs=1)

        # 输出最佳配置
        logger.info(f"Best params: {study.best_params}")
        logger.info(f"Best value: {study.best_value:.1f}")

        # 最终训练（使用最佳参数）
        final_trainer = RL_Trainer(
            env_class=StabilizedTakeoffEnv,
            algorithm="SAC",
            **study.best_params
        )
        final_trainer.train(
            total_timesteps=200000,
            model_name="stabilized_final",
            save_path="./best_stabilized/",
            checkpoint_freq=20000
        )

        # 保存最终模型（包含环境元数据）
        final_trainer.save(
            path="./best_stabilized/final_model.zip",
            # save_replay_buffer = True,
            # include_env = True
        )

    except KeyboardInterrupt:
        logger.warning("Training interrupted, saving checkpoint...")
        final_trainer.save("./best_stabilized/interrupted_model")
    except Exception as e:
        logger.critical(f"Critical error: {str(e)}", exc_info=True)
        sys.exit(1)


if __name__ == "__main__":
    # 环境兼容性检查
    try:

        env = StabilizedTakeoffEnv()
        env.reset()
        env.close()
    except Exception as e:
        logger.error(f"Environment validation failed: {str(e)}")
        sys.exit(1)

    # 依赖检查
    try:
        import stable_baselines3
        import gym
        import torch

        assert gym.__version__ >= "0.26.0", "需要 gym>=0.26.0 版本"
        # assert torch.cuda.is_available(), "需要GPU加速" ，测试GPU加速失败
    except ImportError as e:
        logger.critical(f"Missing dependency: {str(e)}")
        sys.exit(1)

    main()
