import os
import matplotlib.pyplot as plt
from stable_baselines3.common.callbacks import BaseCallback


class RewardLoggerCallback(BaseCallback):
    def __init__(self, artifacts_dir: str, verbose: int = 0, early_stop: bool = False, es_window: int = 100, es_delta: float = 0.05):
        super().__init__(verbose)
        self.artifacts_dir = artifacts_dir
        os.makedirs(self.artifacts_dir, exist_ok=True)
        self.episode_rewards = []
        self.early_stop = early_stop
        self.es_window = es_window
        self.es_delta = es_delta

    def _on_step(self) -> bool:
        # infos is a list for vectorized envs; here single env, use first
        infos = self.locals.get("infos")
        if infos:
            info = infos[0]
            ep = info.get("episode")
            if ep and "r" in ep:
                self.episode_rewards.append(ep["r"])  # episode reward
                # Early stop check occurs at episode boundaries
                if self.early_stop and len(self.episode_rewards) >= self.es_window:
                    window = self.episode_rewards[-self.es_window:]
                    r_max = float(max(window))
                    r_min = float(min(window))
                    delta = r_max - r_min
                    if delta <= self.es_delta:
                        print(f"[EARLY STOP] (PPO) Reward stable over last {self.es_window} episodes (range={delta:.3f} <= {self.es_delta}).")
                        return False
        return True

    def _on_training_end(self) -> None:
        if self.episode_rewards:
            save_training_curve(self.episode_rewards, os.path.join(self.artifacts_dir, "training_curve.png"))


def save_training_curve(episode_rewards, out_path: str):
    plt.figure(figsize=(8, 4))
    plt.plot(episode_rewards, label="Episode reward")
    plt.xlabel("Episode")
    plt.ylabel("Reward")
    plt.title("Training Reward Curve")
    plt.grid(True, alpha=0.3)
    plt.legend()
    plt.tight_layout()
    plt.savefig(out_path)
    plt.close()