from rl_core import RL_Trainer
from DescentEnv import DescentEnv
import logging
import optuna
import sys
import os
from stable_baselines3.common.vec_env import SubprocVecEnv

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler("results/train_descent.log"), logging.StreamHandler()]
)
logger = logging.getLogger(__name__)

def make_env():
    return DescentEnv()

def objective(trial):
    try:
        algorithm = trial.suggest_categorical('algorithm', ['PPO', 'SAC'])
        hyperparams = {}
        if algorithm == 'PPO':
            hyperparams['learning_rate'] = trial.suggest_float('learning_rate', 1e-5, 3e-4, log=True)
            hyperparams['n_steps'] = trial.suggest_int('n_steps', 512, 2048, step=512)
            hyperparams['batch_size'] = trial.suggest_int('batch_size', 64, 256, step=64)
            hyperparams['gamma'] = trial.suggest_float('gamma', 0.98, 0.999)
            hyperparams['gae_lambda'] = trial.suggest_float('gae_lambda', 0.92, 0.98)
            hyperparams['ent_coef'] = trial.suggest_float('ent_coef', 0.005, 0.05)
        else:  # SAC
            hyperparams['learning_rate'] = trial.suggest_float('learning_rate', 5e-5, 5e-4, log=True)
            hyperparams['buffer_size'] = trial.suggest_int('buffer_size', 50000, 500000, log=True)
            hyperparams['batch_size'] = trial.suggest_int('batch_size', 128, 512, step=128)
            hyperparams['gamma'] = trial.suggest_float('gamma', 0.98, 0.999)
            hyperparams['tau'] = trial.suggest_float('tau', 0.002, 0.008)
            hyperparams['ent_coef'] = trial.suggest_float('ent_coef', 0.01, 0.2)  # 改为浮点数范围

        env = SubprocVecEnv([make_env])  # 单环境
        trainer = RL_Trainer(
            env_class=None,
            env=env,
            algorithm=algorithm,
            **hyperparams
        )

        trainer.train(
            total_timesteps=10000,  # 减少步数，快速验证
            model_name=f"trial_{trial.number}",
            save_path="./descent_models/",
            checkpoint_freq=5000,
            log_dir="./tensorboard_logs/"
        )

        mean_reward = trainer.evaluate(episodes=5)
        logger.info(f"Trial {trial.number} - Algorithm: {algorithm}, Mean Reward: {mean_reward}, Params: {trial.params}")
        env.close()
        return mean_reward

    except Exception as e:
        logger.error(f"Trial {trial.number} failed: {str(e)}", exc_info=True)
        return float('-inf')

def main():
    try:
        os.makedirs("./descent_models", exist_ok=True)
        os.makedirs("./best_descent_models", exist_ok=True)
        os.makedirs("./tensorboard_logs", exist_ok=True)

        study = optuna.create_study(direction='maximize')
        study.optimize(objective, n_trials=10, timeout=3600)  # 10次，1小时
        logger.info(f"Best trial: {study.best_trial.params}, Best reward: {study.best_value}")
        best_params = study.best_trial.params
        algorithm = best_params.pop('algorithm')

        env = SubprocVecEnv([make_env])  # 单环境
        trainer = RL_Trainer(
            env_class=None,
            env=env,
            algorithm=algorithm,
            **best_params
        )
        trainer.train(
            total_timesteps=100000,
            model_name="best_descent_model",
            save_path="./best_descent_models/",
            checkpoint_freq=10000,
            log_dir="./tensorboard_logs/"
        )
        trainer.save("./best_descent_models/best_descent_model_final")
        logger.info("Best model saved to ./best_descent_models/best_descent_model_final")

        mean_reward = trainer.evaluate(episodes=10)
        logger.info(f"Final model mean reward: {mean_reward}")
        env.close()

    except KeyboardInterrupt:
        logger.warning("Training interrupted, saving current model...")
        trainer.save("./best_descent_models/interrupted_model")
        env.close()
        sys.exit(0)
    except Exception as e:
        logger.error(f"Training failed: {str(e)}", exc_info=True)
        sys.exit(1)

if __name__ == "__main__":
    try:
        import stable_baselines3
        import gym
        import optuna
    except ImportError as e:
        logger.critical(f"Missing dependency: {str(e)}")
        sys.exit(1)
    main()