"""
This script visualizes episodes from an OGBench dataset by rendering them
using the MuJoCo environment and saving the output as an MP4 video.
"""

import argparse
import os

import imageio
import mujoco
import numpy as np
import ogbench
from tqdm import tqdm


def do_set_obs_state_to_env(
    env,
    obs: np.ndarray,
) -> None:
    """
    Assign state to given environment from observation data.
    This function is copied from the legacy script:
    old_project/jobs/run_env_ogbench.py
    """
    if env.spec.id.startswith("antmaze-"):
        env.unwrapped.set_state(obs[0:15], obs[15 : 15 + 14])
    if env.spec.id.startswith("pointmaze-"):
        env.unwrapped.set_xy(obs[0:2])
    if env.spec.id.startswith("cube-single-"):
        env.unwrapped._data.qpos[env.unwrapped._arm_joint_ids] = obs[0:6].copy()
        env.unwrapped._data.qvel[env.unwrapped._arm_joint_ids] = obs[6 : 6 + 6].copy()
        env.unwrapped._data.qpos[env.unwrapped._gripper_opening_joint_id] = (
            obs[6 + 6 + 3 + 2].copy() * 0.8 / 3.0
        )
        env.unwrapped._data.joint("object_joint_0").qpos[0:3] = obs[
            6 + 6 + 3 + 2 + 1 + 1 : 6 + 6 + 3 + 2 + 1 + 1 + 3
        ].copy() / 10.0 + np.array([0.425, 0.0, 0.0])
        env.unwrapped._data.joint("object_joint_0").qpos[3 : 3 + 4] = obs[
            6 + 6 + 3 + 2 + 1 + 1 + 3 : 6 + 6 + 3 + 2 + 1 + 1 + 3 + 4
        ].copy()
        mujoco.mj_forward(env.unwrapped._model, env.unwrapped._data)
    if env.spec.id.startswith("scene-"):
        env.unwrapped._data.qpos[env.unwrapped._arm_joint_ids] = obs[0:6].copy()
        env.unwrapped._data.qvel[env.unwrapped._arm_joint_ids] = obs[6 : 6 + 6].copy()
        env.unwrapped._data.qpos[env.unwrapped._gripper_opening_joint_id] = (
            obs[6 + 6 + 3 + 2].copy() * 0.8 / 3.0
        )
        tmp_offset = 6 + 6 + 3 + 2 + 1 + 1
        env.unwrapped._data.joint("object_joint_0").qpos[0:3] = obs[
            tmp_offset : tmp_offset + 3
        ].copy() / 10.0 + np.array([0.425, 0.0, 0.0])
        env.unwrapped._data.joint("object_joint_0").qpos[3 : 3 + 4] = obs[
            tmp_offset + 3 : tmp_offset + 3 + 4
        ].copy()
        tmp_offset += 3 + 4 + 2
        env.unwrapped._cur_button_states[0] = obs[tmp_offset].copy() > 0
        env.unwrapped._data.joint("buttonbox_joint_0").qpos[0] = (
            obs[tmp_offset + 2].copy() / 120.0
        )
        env.unwrapped._data.joint("buttonbox_joint_0").qvel[0] = obs[
            tmp_offset + 3
        ].copy()
        tmp_offset += 4
        env.unwrapped._cur_button_states[1] = obs[tmp_offset + 1].copy() > 0
        env.unwrapped._data.joint("buttonbox_joint_1").qpos[0] = (
            obs[tmp_offset + 2].copy() / 120.0
        )
        env.unwrapped._data.joint("buttonbox_joint_1").qvel[0] = obs[
            tmp_offset + 3
        ].copy()
        tmp_offset += 4
        env.unwrapped._data.joint("drawer_slide").qpos[0] = (
            obs[tmp_offset + 0].copy() / 18.0
        )
        env.unwrapped._data.joint("drawer_slide").qvel[0] = obs[tmp_offset + 1].copy()
        env.unwrapped._data.joint("window_slide").qpos[0] = (
            obs[tmp_offset + 2].copy() / 15.0
        )
        env.unwrapped._data.joint("window_slide").qvel[0] = obs[tmp_offset + 3].copy()
        mujoco.mj_forward(env.unwrapped._model, env.unwrapped._data)
        env.unwrapped._apply_button_states()


RENDER_WIDTH = 200
RENDER_HEIGHT = 200


def do_render_observation(
    env,
    obs: np.ndarray,
) -> np.ndarray:
    """
    Render with given environment given observation and return a RGB frame.
    This function is copied from the legacy script:
    old_project/jobs/run_env_ogbench.py
    """
    do_set_obs_state_to_env(env, obs)
    return env.render()


def main():
    """Main function to run the visualization script."""
    parser = argparse.ArgumentParser(
        description="Visualize OGBench dataset episodes and save to MP4."
    )
    parser.add_argument(
        "--env_name",
        type=str,
        required=True,
        help="Name of the OGBench environment (e.g., 'cube-single-play-v0').",
    )
    parser.add_argument(
        "--output_file",
        type=str,
        default=None,
        help="Path to the output MP4 file. Defaults to '<env_name>.mp4'.",
    )
    parser.add_argument(
        "--num_episodes", type=int, default=3, help="Number of episodes to visualize."
    )
    args = parser.parse_args()

    # --- 1. Determine file paths ---
    npz_path = f"data_static/{args.env_name}.npz"
    output_file = args.output_file or f"{args.env_name}.mp4"

    if not os.path.exists(npz_path):
        print(f"Error: Dataset file not found at {npz_path}")
        print(
            "Please ensure the dataset exists. You may need to run a script that downloads it first."
        )
        return

    # --- 2. Initialize environment and load data ---
    print(f"Initializing '{args.env_name}' environment for rendering...")
    env, _, _ = ogbench.make_env_and_datasets(
        args.env_name,
        dataset_dir="data_static/",
        compact_dataset=True,
        render_mode="rgb_array",
    )
    env.reset()  # Call reset once to initialize the environment for rendering

    print(f"Loading dataset from {npz_path}...")
    with np.load(npz_path) as data:
        observations = data["observations"]
        terminals = data["terminals"]

    # --- 3. Split data into episodes ---
    print("Splitting data into episodes based on 'terminals' flags...")
    terminal_indices = np.where(terminals == 1)[0]
    episodes = []
    start_idx = 0
    for end_idx in terminal_indices:
        episode_obs = observations[start_idx : end_idx + 1]
        episodes.append(episode_obs)
        start_idx = end_idx + 1

    if not episodes:
        print("Error: No episodes found. Check the 'terminals' data in the NPZ file.")
        return

    num_to_visualize = min(args.num_episodes, len(episodes))
    print(f"Found {len(episodes)} episodes. Visualizing the first {num_to_visualize}.")

    # --- 4. Render frames and create video ---
    print(f"Writing video to {output_file}...")
    with imageio.get_writer(output_file, fps=30) as writer:
        for i in range(num_to_visualize):
            episode_obs = episodes[i]
            print(
                f"  - Rendering Episode {i + 1}/{num_to_visualize} ({len(episode_obs)} frames)"
            )

            # Add a black frame with text before each episode
            info_frame = np.zeros((RENDER_HEIGHT, RENDER_WIDTH, 3), dtype=np.uint8)
            # A simple way to put text on an image without extra dependencies like OpenCV
            # This is a placeholder; for better text, a library like Pillow or OpenCV would be needed.
            # For now, we just print to console and add a black screen.

            for _ in range(30):  # Show text for 1 second
                writer.append_data(info_frame)

            for frame_idx in tqdm(range(len(episode_obs)), desc=f"  Episode {i+1}", leave=False):
                obs = episode_obs[frame_idx]
                frame = do_render_observation(env, obs)
                writer.append_data(frame)

    print("Done. Video saved successfully.")


if __name__ == "__main__":
    main()
