import logging
import os

import flax.serialization
import hydra
import imageio
import jax
import jax.numpy as jnp
import numpy as np  # For np.linalg.norm
import ogbench
from omegaconf import DictConfig, OmegaConf

from ne_flow import agent as agent_lib

log = logging.getLogger(__name__)


@hydra.main(config_path="../configs", config_name="visualize_tdflow", version_base=None)
def main(cfg: DictConfig) -> None:
    log.info("--- Configuration ---")
    log.info(OmegaConf.to_yaml(cfg))
    log.info("---------------------")

    dataset_name = cfg.dataset_name
    log.info(f"Using dataset name: {dataset_name}")

    log.info("Loading environment...")
    env, _, _ = ogbench.make_env_and_datasets(
        dataset_name,
        dataset_dir="data_static/",
    )

    raw_observation_shape = env.observation_space.shape
    action_shape = env.action_space.shape

    rng = jax.random.PRNGKey(cfg.seed)
    rng, agent_rng = jax.random.split(rng)

    agent_config_dict = OmegaConf.to_container(cfg, resolve=True)

    # --- Encoder Config ---
    input_dim = raw_observation_shape[0]
    encoder_name = cfg.encoder.name
    output_dim = input_dim if encoder_name == "identity" else cfg.encoder.latent_dim
    encoder_config_agent0 = [("proprio", encoder_name, input_dim, output_dim)]
    size_latent_obs = output_dim
    if encoder_name == "identity":
        cfg.encoder.latent_dim = output_dim

    # --- Agent Initialization ---
    log.info("Initializing pre-trained agent0...")
    agent0_kwargs = {
        "config": agent_config_dict,
        "encoder_config": encoder_config_agent0,
        "size_latent_obs": size_latent_obs,
        "size_act": action_shape[0],
        "rng": rng,
    }
    agent0 = agent_lib.GCRLAgent0(**agent0_kwargs)
    with open(cfg.agent0_ckpt_path, "rb") as f:
        agent0.combined_state = agent0.combined_state.replace(
            ema_params=flax.serialization.from_bytes(
                agent0.combined_state.ema_params, f.read()
            )
        )

    log.info("Initializing TDFlowAgent...")
    tdflow_kwargs = {
        "config": agent_config_dict,
        "size_latent_obs": size_latent_obs,
        "size_act": action_shape[0],
        "rng": agent_rng,
        "agent0": agent0,
    }
    tdflow_agent = agent_lib.TDFlowAgent(**tdflow_kwargs)
    with open(cfg.tdflow_ckpt_path, "rb") as f:
        tdflow_agent.v_field_state = tdflow_agent.v_field_state.replace(
            ema_params=flax.serialization.from_bytes(
                tdflow_agent.v_field_state.ema_params, f.read()
            )
        )

    agent0_inference_fn = getattr(agent0, cfg.inference.name)
    log.info(f"Using agent0 inference method: '{cfg.inference.name}'")

    # --- Get visualization IDs and constants from environment ---
    model = env.unwrapped._model
    data = env.unwrapped._data

    target0_mocap_id = model.body("object_target_0").mocapid[0]
    target1_mocap_id = model.body("object_target_1").mocapid[0]

    target0_geom_ids = [
        i
        for i, body_id in enumerate(model.geom_bodyid)
        if body_id == model.body("object_target_0").id
    ]
    target1_geom_ids = [
        i
        for i, body_id in enumerate(model.geom_bodyid)
        if body_id == model.body("object_target_1").id
    ]
    all_subgoal_geom_ids = target0_geom_ids + target1_geom_ids

    xyz_center = np.array([0.425, 0.0, 0.0])
    xyz_scaler = 10.0

    # --- Hierarchical Evaluation Loop ---
    video_dir = os.path.join(os.getcwd(), cfg.video_dir)
    os.makedirs(video_dir, exist_ok=True)
    log.info("Starting hierarchical evaluation and rendering...")

    # Calculate planning period based on gamma, as per original logic
    planning_period = (int(1 / (1 - cfg.gamma)) if cfg.gamma < 1.0 else 1)
    log.info(f"TDFlow planning period: {planning_period} steps")

    for episode in range(cfg.num_episodes):
        log.info(f"Starting episode {episode + 1}/{cfg.num_episodes}")
        obs, info = env.reset()
        raw_g_final = info["goal"]
        
        z_final_goal = jnp.array(raw_g_final, dtype=jnp.float32)
        z_current_goal = z_final_goal

        agent0.reset_history()
        done = False
        step_counter = 0
        frames = []

        # Initially, make the subgoal markers invisible
        for gid in all_subgoal_geom_ids:
            model.geom_rgba[gid][3] = 0.0

        while not done:
            current_state_latent = jnp.array(obs, dtype=jnp.float32)

            # --- 1. Check for replanning conditions ---
            replan_periodic = (step_counter % planning_period == 0)
            subgoal_reached = (
                (z_current_goal is not z_final_goal) and 
                (np.linalg.norm(current_state_latent - z_current_goal) < cfg.goal_tolerance)
            )

            if replan_periodic or subgoal_reached:
                if subgoal_reached:
                    log.info(f"Step {step_counter}: Subgoal reached. Replanning.")
                else:
                    log.info(f"Step {step_counter}: Periodic replanning.")

                rng, tdflow_rng = jax.random.split(rng)

                z_sub_goal = tdflow_agent.plan_next_waypoint(
                    s_curr_latent=current_state_latent,
                    g_final_latent=z_final_goal,
                    rng=tdflow_rng,
                )
                z_current_goal = z_sub_goal
                log.info(f"Planned new subgoal (latent): {z_current_goal}")

                subgoal_pos_0 = (z_current_goal[0:3] / xyz_scaler) + xyz_center
                subgoal_pos_1 = (z_current_goal[3:6] / xyz_scaler) + xyz_center
                log.info(f"Decoded subgoal positions: {subgoal_pos_0}, {subgoal_pos_1}")

                data.mocap_pos[target0_mocap_id] = subgoal_pos_0
                data.mocap_pos[target1_mocap_id] = subgoal_pos_1

                for gid in all_subgoal_geom_ids:
                    model.geom_rgba[gid][3] = 0.3

            # --- 2. Agent0 executes action towards the current goal ---
            rng, agent0_rng = jax.random.split(rng)
            action_chunk = agent0_inference_fn(
                rng=agent0_rng,
                batch_obs={"proprio": current_state_latent[None, :]},
                batch_goal={"proprio": z_current_goal[None, :]},
            )
            action = np.array(action_chunk[0, 0, :])

            # --- 3. Render frame & Step environment ---
            frames.append(env.render())
            obs, reward, terminated, truncated, info = env.step(action)

            done = terminated or truncated
            step_counter += 1

            if done:
                log.info(f"Episode {episode + 1} finished after {step_counter} steps.")

        # Save video for the episode
        video_filepath = os.path.join(video_dir, f"episode_{episode + 1:03d}.mp4")
        imageio.mimsave(video_filepath, frames, fps=30)
        log.info(f"Video saved to {video_filepath}")

    log.info(f"Rendering complete. Videos saved in {video_dir}")


if __name__ == "__main__":
    main()
