# File: rl_core.py   强化学习核心算法
import os
import numpy as np
from stable_baselines3 import PPO, SAC
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines3.common.callbacks import BaseCallback, CheckpointCallback, EvalCallback
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.buffers import ReplayBuffer
from typing import Optional, Type, Dict, Any
import logging
import warnings
from stable_baselines3.common.vec_env import SubprocVecEnv


# 抑制兼容性警告
warnings.filterwarnings("ignore", category=UserWarning)

class RL_Trainer:
    def __init__(self, env_class: Type, policy: str = "MlpPolicy", algorithm: str = "PPO", **hyperparams):
        self.env = DummyVecEnv([lambda: env_class()])
        # self.env = SubprocVecEnv([lambda: env_class() for _ in range(4)])  # 4个并行环境
        self.algorithm = algorithm.upper()
        self.model = self._init_model(policy, hyperparams)
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)

    def _init_model(self, policy, hyperparams):

        if self.algorithm == "PPO":
            return PPO(
                policy,
                self.env,
                verbose=1,
                tensorboard_log="./tensorboard/",
                **{k: v for k, v in hyperparams.items()
                   if k in PPO.__init__.__code__.co_varnames}
            )
        elif self.algorithm == "SAC":

            return SAC(
                policy,
                self.env,
                verbose=1,
                tensorboard_log="./tensorboard/",
                **{k: v for k, v in hyperparams.items() if k in SAC.__init__.__code__.co_varnames}
            )

        else:
            raise ValueError(f"Unsupported algorithm: {self.algorithm}")

    def train(self, total_timesteps: int = 100000, save_path: str = "./models/",
             checkpoint_freq: int = 1000, model_name: str = "rl_model"):
        """
                训练强化学习模型。

                Args:
                    total_timesteps (int, optional): 总训练步数，默认为 100000。
                    save_path (str, optional): 模型保存路径，默认为 "./models/"。
                    checkpoint_freq (int, optional): 检查点保存频率，默认为 1000。
                    model_name (str, optional): 模型名称，默认为 "rl_model"。
        """

        os.makedirs(save_path, exist_ok=True)
        checkpoint_callback = CheckpointCallback(
            save_freq=max(checkpoint_freq // self.env.num_envs, 1),
            save_path=save_path,
            name_prefix=model_name
        )
        self.model.learn(
            total_timesteps=total_timesteps,
            callback=checkpoint_callback,
            tb_log_name=f"{model_name}_log"
        )
        self.save(f"{save_path}/{model_name}_final")

    def save(self, path: str):
        self.model.save(path)

    def load(self, path: str, env: Optional[DummyVecEnv] = None):
        if self.algorithm == "PPO":
            self.model = PPO.load(path, env=env or self.env)
        elif self.algorithm == "SAC":
            self.model = SAC.load(path, env=env or self.env)
        return self

    def evaluate(self, episodes: int = 10):
        obs = self.env.reset()
        episode_rewards = []
        for _ in range(episodes):
            done, total_reward = False, 0
            while not done:
                action, _ = self.model.predict(obs)
                # action , _ = self.model.predict(obs)[0] + np.random.normal(0, 0.1, size=action.shape) # 增加噪声：在动作输出中添加高斯噪声，增强探索：
                # action = action + np.random.normal(0, 0.1, size=action.shape)  # 添加高斯噪声
                action = np.clip(action, -1, 1)
                obs, reward, done, _ = self.env.step(action)
                total_reward += reward[0] if isinstance(reward, np.ndarray) else reward  # 处理标量或数组
            episode_rewards.append(total_reward)
        return np.mean(episode_rewards)