import os
import argparse
import numpy as np
import imageio
import gymnasium as gym
from gymnasium.wrappers import RecordEpisodeStatistics
# Defer Stable-Baselines3 imports and callbacks to inside train_ppo

from envs.maze_env import MazeEnv
# from utils.callbacks import RewardLoggerCallback  # import inside try
from utils.plot import save_training_curve


def make_env(size: int, obstacle_density: float, max_steps: int, render_mode=None):
    def _init():
        env = MazeEnv(size=size, obstacle_density=obstacle_density, max_steps=max_steps, render_mode=render_mode)
        env = RecordEpisodeStatistics(env)
        return env
    return _init


def train_ppo(total_timesteps: int, size: int, obstacle_density: float, eval_episodes: int, artifacts_dir: str, do_render: bool = True, log_interval: int = 50, regen_maze: bool = False, gif_stages: int = 5, early_stop: bool = False, es_window: int = 100, es_delta: float = 0.05):
    os.makedirs(artifacts_dir, exist_ok=True)
    max_steps = size * size

    try:
        # Import SB3 components lazily to avoid import-time Torch dependency issues
        from stable_baselines3 import PPO
        from stable_baselines3.common.vec_env import DummyVecEnv
        from utils.callbacks import RewardLoggerCallback

        # Vectorized env (single)
        env = DummyVecEnv([make_env(size, obstacle_density, max_steps)])

        model = PPO("MlpPolicy", env, verbose=1)
        callback = RewardLoggerCallback(artifacts_dir, early_stop=early_stop, es_window=es_window, es_delta=es_delta)
        # staged training: split into gif_stages parts, render GIF after each stage
        stages = max(1, gif_stages)
        stage_interval = max(1, total_timesteps // stages)
        remaining = total_timesteps
        for i in range(1, stages + 1):
            this_steps = stage_interval if i < stages else remaining
            model.learn(total_timesteps=this_steps, callback=callback, reset_num_timesteps=False)
            if do_render:
                render_rollout(size=size, obstacle_density=obstacle_density, max_steps=max_steps, model=model, episodes=eval_episodes, artifacts_dir=artifacts_dir, gif_name=f"demo_stage_{i}.gif", mp4_name=None)
            remaining -= this_steps

        model_path = os.path.join(artifacts_dir, "model.zip")
        model.save(model_path)
        print(f"[INFO] Model saved to {model_path}")

        if do_render:
            render_rollout(size=size, obstacle_density=obstacle_density, max_steps=max_steps, model=model, episodes=eval_episodes, artifacts_dir=artifacts_dir)
    except Exception as e:
        print(f"[WARN] PPO training failed, falling back to Q-learning. Reason: {e}")
        train_q_learning(total_timesteps=total_timesteps, size=size, obstacle_density=obstacle_density, eval_episodes=eval_episodes, artifacts_dir=artifacts_dir, log_interval=log_interval, regen_maze=regen_maze, gif_stages=gif_stages, early_stop=early_stop, es_window=es_window, es_delta=es_delta)


def render_rollout(size: int, obstacle_density: float, max_steps: int, model, episodes: int, artifacts_dir: str, gif_name: str = "demo.gif", mp4_name = "demo.mp4"):
    print("[INFO] Generating rollout demonstration...")
    frames = []
    for ep in range(episodes):
        env = MazeEnv(size=size, obstacle_density=obstacle_density, max_steps=max_steps, render_mode="rgb_array")
        obs, info = env.reset()
        terminated = False
        truncated = False
        while not (terminated or truncated):
            action, _ = model.predict(obs, deterministic=True)
            obs, reward, terminated, truncated, info = env.step(int(action))
            frame = env.render()
            frames.append(frame)
        env.close()

    gif_path = os.path.join(artifacts_dir, gif_name)
    imageio.mimsave(gif_path, frames, format="GIF", fps=10)
    print(f"[INFO] GIF saved to {gif_path}")

    if mp4_name:
        mp4_path = os.path.join(artifacts_dir, mp4_name)
        try:
            imageio.mimsave(mp4_path, frames, fps=10)
            print(f"[INFO] MP4 saved to {mp4_path}")
        except Exception as e:
            print(f"[WARN] MP4 export failed (likely missing ffmpeg): {e}")


def train_q_learning(total_timesteps: int, size: int, obstacle_density: float, eval_episodes: int, artifacts_dir: str, log_interval: int = 50, regen_maze: bool = False, gif_stages: int = 5, early_stop: bool = False, es_window: int = 100, es_delta: float = 0.05):
    """A simple tabular Q-learning fallback when PPO cannot run (e.g., Torch issues).
    With optional early stopping based on stability of recent episode rewards.
    """
    import matplotlib.pyplot as plt

    max_steps = size * size
    q_table = np.zeros((size * size, 4), dtype=np.float32)

    def obs_to_state(obs):
        # Convert normalized obs back to grid position
        ax = int(round(obs[0] * (size - 1)))
        ay = int(round(obs[1] * (size - 1)))
        return ax * size + ay

    episode_rewards = []
    epsilon = 1.0
    epsilon_min = 0.05
    epsilon_decay = 0.995
    alpha = 0.5
    gamma = 0.95

    env = MazeEnv(size=size, obstacle_density=obstacle_density, max_steps=max_steps)
    steps_done = 0
    # staged GIF thresholds
    stages = max(1, gif_stages)
    stage_interval = max(1, total_timesteps // stages)
    next_threshold = stage_interval
    stage_index = 1
    print(f"[INFO] Q-learning config: size={size}, obstacle_density={obstacle_density}, total_timesteps={total_timesteps}, max_steps={max_steps}, log_interval={log_interval}, regen_maze={regen_maze}, early_stop={early_stop}, es_window={es_window}, es_delta={es_delta}")
    while steps_done < total_timesteps:
        obs, info = env.reset(options={"regen": regen_maze})
        done = False
        truncated = False
        ep_reward = 0.0
        while not (done or truncated):
            s = obs_to_state(obs)
            if np.random.rand() < epsilon:
                a = np.random.randint(0, 4)
            else:
                a = int(np.argmax(q_table[s]))

            next_obs, r, done, truncated, info = env.step(a)
            s_next = obs_to_state(next_obs)

            td_target = r + gamma * (0 if (done or truncated) else np.max(q_table[s_next]))
            td_error = td_target - q_table[s, a]
            q_table[s, a] += alpha * td_error

            obs = next_obs
            ep_reward += r
            steps_done += 1
            # generate staged GIF when crossing thresholds
            if stage_index <= stages and steps_done >= next_threshold:
                render_rollout_q(size=size, obstacle_density=obstacle_density, max_steps=max_steps, q_table=q_table, episodes=eval_episodes, artifacts_dir=artifacts_dir, gif_name=f"demo_stage_{stage_index}.gif")
                stage_index += 1
                next_threshold += stage_interval
            if steps_done >= total_timesteps:
                break
        episode_rewards.append(ep_reward)
        epsilon = max(epsilon_min, epsilon * epsilon_decay)

        # Early stopping based on stability of recent rewards
        if early_stop and len(episode_rewards) >= es_window:
            window = episode_rewards[-es_window:]
            r_max = float(np.max(window))
            r_min = float(np.min(window))
            delta = r_max - r_min
            if delta <= es_delta:
                print(f"[EARLY STOP] Reward stable over last {es_window} episodes (range={delta:.3f} <= {es_delta}). Steps={steps_done}/{total_timesteps}, episodes={len(episode_rewards)}")
                break

        # Console progress printing
        episodes = len(episode_rewards)
        if (episodes % log_interval == 0) or (steps_done >= total_timesteps):
            window = episode_rewards[-log_interval:] if episodes >= log_interval else episode_rewards
            mean_reward = float(np.mean(window)) if window else 0.0
            pct = int(100 * steps_done / total_timesteps)
            print(f"[PROGRESS] episodes={episodes}, steps={steps_done}/{total_timesteps} ({pct}%), epsilon={epsilon:.3f}, mean_reward(last {len(window)})={mean_reward:.3f}")

    # Save curve
    os.makedirs(artifacts_dir, exist_ok=True)
    save_training_curve(episode_rewards, os.path.join(artifacts_dir, "training_curve.png"))

    # Save Q-table
    q_table_path = os.path.join(artifacts_dir, "q_table.npy")
    np.save(q_table_path, q_table)
    print(f"[INFO] Q-table saved to {q_table_path}")

    # Save training metadata
    metadata = {
        'size': size,
        'obstacle_density': obstacle_density,
        'total_episodes': len(episode_rewards),
        'total_steps': steps_done,
        'final_epsilon': epsilon,
        'q_table_shape': q_table.shape,
        'hyperparameters': {
            'alpha': alpha,
            'gamma': gamma,
            'epsilon_min': epsilon_min,
            'epsilon_decay': epsilon_decay
        }
    }
    metadata_path = os.path.join(artifacts_dir, "q_learning_metadata.json")
    import json
    with open(metadata_path, 'w') as f:
        json.dump(metadata, f, indent=2)
    print(f"[INFO] Training metadata saved to {metadata_path}")

    # Render rollout using greedy policy
    render_rollout_q(size=size, obstacle_density=obstacle_density, max_steps=max_steps, q_table=q_table, episodes=eval_episodes, artifacts_dir=artifacts_dir)


def render_rollout_q(size: int, obstacle_density: float, max_steps: int, q_table: np.ndarray, episodes: int, artifacts_dir: str, gif_name: str = "demo.gif"):
    def obs_to_state(obs):
        ax = int(round(obs[0] * (size - 1)))
        ay = int(round(obs[1] * (size - 1)))
        return ax * size + ay

    frames = []
    for ep in range(episodes):
        env = MazeEnv(size=size, obstacle_density=obstacle_density, max_steps=max_steps, render_mode="rgb_array")
        obs, info = env.reset()
        terminated = False
        truncated = False
        while not (terminated or truncated):
            s = obs_to_state(obs)
            action = int(np.argmax(q_table[s]))
            obs, reward, terminated, truncated, info = env.step(action)
            frames.append(env.render())
        env.close()

    gif_path = os.path.join(artifacts_dir, gif_name)
    imageio.mimsave(gif_path, frames, format="GIF", fps=10)
    print(f"[INFO] GIF saved to {gif_path} (Q-learning fallback)")


def parse_args():
    parser = argparse.ArgumentParser(description="RL Maze with PPO (SB3)")
    parser.add_argument("--train-steps", type=int, default=30000, help="Total training timesteps")
    parser.add_argument("--maze-size", type=int, default=20, help="Maze grid size")
    parser.add_argument("--obstacle-density", type=float, default=0.50, help="Obstacle density [0, 0.8]")
    parser.add_argument("--eval-episodes", type=int, default=5, help="Episodes to render for GIF/video")
    parser.add_argument("--render-gif", action="store_true", help="Generate rollout GIF after training")
    parser.add_argument("--artifacts-dir", type=str, default="artifacts", help="Directory to save outputs")
    parser.add_argument("--log-interval", type=int, default=50, help="Episode interval for console progress printing")
    parser.add_argument("--regen-maze", action="store_true", help="Regenerate maze on each episode reset")
    parser.add_argument("--gif-stages", type=int, default=5, help="Split training into N stages and save a GIF after each stage")
    # Early stopping parameters
    parser.add_argument("--early-stop", default=True, help="Enable early stopping based on reward curve stability")
    parser.add_argument("--es-window", type=int, default=10, help="Episodes window to evaluate stability")
    parser.add_argument("--es-delta", type=float, default=1, help="Max reward range within window to trigger early stop")
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()

    train_ppo(
        total_timesteps=args.train_steps,
        size=args.maze_size,
        obstacle_density=args.obstacle_density,
        eval_episodes=args.eval_episodes,
        artifacts_dir=args.artifacts_dir,
        do_render=args.render_gif,
        log_interval=args.log_interval,
        regen_maze=args.regen_maze,
        gif_stages=args.gif_stages,
        early_stop=args.early_stop,
        es_window=args.es_window,
        es_delta=args.es_delta,
    )