from rl_core import RL_Trainer  # 强化学习核心模块
from task1_env0329 import TakeoffEnv  # 起飞任务环境
import logging
import sys
import os
from vjoy_control_new import activate_dcs_window

# 配置日志系统以记录训练性能
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("results/training.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

def objective(trial):
    """Optuna 目标函数：训练 SAC 模型并返回评估指标"""
    try:
        # 定义 SAC 超参数搜索空间
        # learning_rate = trial.suggest_float('learning_rate', 1e-4, 1e-2, log=True)  # 学习率
        learning_rate = trial.suggest_float('learning_rate', 1e-5, 1e-3, log=True)
        buffer_size = trial.suggest_int('buffer_size', 10000, 1000000, log=True)  # 经验回放缓冲区大小
        batch_size = trial.suggest_int('batch_size', 64, 256, step=64)  # 批大小
        # batch_size = trial.suggest_int('batch_size', 256, 512, step=64)
        gamma = trial.suggest_float('gamma', 0.95, 0.999)  # 折扣因子
        # gamma = trial.suggest_float('gamma', 0.9, 0.99)
        tau = trial.suggest_float('tau', 0.001, 0.01)  # 软更新系数
        ent_coef = trial.suggest_categorical('ent_coef', ['auto', 0.1, 0.01])  # 熵系数（支持自动调整）
        # ent_coef = trial.suggest_categorical('ent_coef', ['auto', 0.05, 0.2, 0.5])
        train_freq = trial.suggest_int('train_freq', 1, 10)  # 训练频率

        # 创建 RL_Trainer 实例，使用 SAC
        trainer = RL_Trainer(
            env_class=TakeoffEnv,
            algorithm="SAC",
            learning_rate=learning_rate,
            buffer_size=buffer_size,
            batch_size=batch_size,
            gamma=gamma,
            tau=tau,
            ent_coef=ent_coef,
            train_freq=train_freq
        )

        # 训练模型
        trainer.train(
            total_timesteps=20000,  # SAC 需要更多步数以填充回放缓冲区
            model_name=f"trial_{trial.number}",
            save_path="./optuna_trials/",
            checkpoint_freq=5000
        )

        # 评估模型性能
        mean_reward = trainer.evaluate(episodes=5)
        logger.info(f"Trial {trial.number} - Mean Reward: {mean_reward}, Params: {trial.params}")

        return mean_reward

    except Exception as e:
        logger.error(f"Trial {trial.number} failed: {str(e)}")
        return float('-inf')  # 返回一个较低值表示失败

def main():
    """主函数：运行超参数优化并训练最佳模型"""
    trainer = None  # 提前定义trainer变量
    try:
        # 创建 Optuna study，目标是最大化平均奖励
        study = optuna.create_study(direction='maximize')

        # 运行超参数优化
        study.optimize(objective, n_trials=20, timeout=3600)  # 20 次试验或最多 1 小时

        # 输出最佳超参数
        logger.info(f"Best trial: {study.best_trial.params}, Best reward: {study.best_value}")

        # 使用最佳超参数重新训练模型
        best_params = study.best_trial.params
        trainer = RL_Trainer(   # 这里正式初始化trainer
            env_class=TakeoffEnv,
            algorithm="SAC",
            **best_params
        )
        trainer.train(
            total_timesteps=50000,  # SAC 通常需要更多步数  增加步数
            model_name="best_model_sac",
            save_path="./best_models/",
            checkpoint_freq=10000
        )

        # 保存最佳模型
        save_path = os.path.join("best_models", "best_model_sac_final.zip")
        trainer.save(save_path)
        logger.info("最佳模型已保存至 ./best_models/best_model_sac_final.zip")

    except KeyboardInterrupt:
        logger.warning("用户中断训练，正在保存当前模型...")
        if trainer is not None:  # 检查trainer是否已初始化
            save_path = os.path.join("best_models", "best_model_sac_final.zip")
            trainer.save(save_path)
            logger.info(f"已保存中断时的模型到 {save_path}")
        else:
            logger.warning("训练尚未开始，无模型可保存")
        sys.exit(0)
    except Exception as e:
        logger.error(f"训练过程异常终止: {str(e)}", exc_info=True)
        sys.exit(1)

if __name__ == "__main__":
    # 依赖检查
    try:
        import stable_baselines3
        import gym
        import optuna

        assert gym.__version__ >= "0.26.0", "需要 gym>=0.26.0 版本"
    except ImportError as e:
        logger.critical(f"依赖库缺失: {str(e)}")
        sys.exit(1)

    main()