import logging
import os

import flax.serialization
import hydra
import jax
import ogbench
from omegaconf import DictConfig, OmegaConf

from ne_flow import agent as agent_lib
from ne_flow import evaling

log = logging.getLogger(__name__)


@hydra.main(config_path="../configs", config_name="visualize", version_base=None)
def main(cfg: DictConfig) -> None:
    log.info("--- Configuration ---")
    log.info(OmegaConf.to_yaml(cfg))
    log.info("---------------------")

    # Automatically determine the dataset_name from the checkpoint path.
    # This makes the script more robust and avoids shape mismatch errors.
    try:
        # Assumes path structure: .../checkpoints/DATASET_NAME/TIMESTAMP/...
        path_parts = os.path.normpath(cfg.ckpt_path).split(os.sep)
        dataset_name = path_parts[-3]
        log.info(f"Inferred dataset name: {dataset_name}")
    except IndexError:
        log.error("Error: Could not infer dataset name from checkpoint path.")
        log.error(
            "Please ensure the path follows the structure '.../checkpoints/DATASET_NAME/...' or provide it manually."
        )
        exit(1)

    # Update cfg with inferred dataset_name
    cfg.dataset_name = dataset_name

    log.info("Loading environment...")
    env, _, _ = ogbench.make_env_and_datasets(
        cfg.dataset_name,
        dataset_dir="data_static/",
        compact_dataset=True,
    )

    # Extract observation and action shapes from the environment
    observation_shape = env.observation_space.shape
    action_shape = env.action_space.shape

    # Initialize agent
    rng = jax.random.PRNGKey(cfg.seed)
    rng, agent_rng = jax.random.split(rng)

    # This assumes GCRLAgent0 was used for training.
    # A more robust solution would save the agent class name with the checkpoint.
    agent_class = agent_lib.GCRLAgent0

    # We need to construct a dictionary for the agent's config from the flat Hydra cfg
    # and merge inference-specific parameters
    agent_config_params = OmegaConf.to_container(cfg, resolve=True)
    # Remove top-level keys not meant for agent config directly
    agent_config_params.pop('ckpt_path', None)
    agent_config_params.pop('num_episodes', None)
    agent_config_params.pop('video_dir', None)
    agent_config_params.pop('inference', None) # Inference config is merged below

    # Add inference-specific params to the main agent config for self-contained agent
    agent_config_params.update(OmegaConf.to_container(cfg.inference, resolve=True))

    agent_kwargs = {
        "config": agent_config_params,
        "encoder_config": [
            ("proprio", "mlp", observation_shape[0], 64),
        ],
        "size_act": action_shape[0],
        "rng": agent_rng,
    }

    agent = agent_class(**agent_kwargs)

    # Load the trained parameters
    log.info(f"Loading checkpoint from {cfg.ckpt_path}...")
    with open(cfg.ckpt_path, "rb") as f:
        loaded_bytes = f.read()

    loaded_params = flax.serialization.from_bytes(
        agent.combined_state.ema_params, loaded_bytes
    )

    # Create a new state with the loaded parameters
    # We update the ema_params, which are used for inference.
    agent.combined_state = agent.combined_state.replace(ema_params=loaded_params)

    # Resolve and Bind Inference Strategy
    log.info(f"Resolving inference strategy: '{cfg.inference.name}'")
    method_name = cfg.inference.name
    try:
        inference_fn = getattr(agent, method_name)
    except AttributeError:
        log.error(
            f"Error: Agent {agent_class.__name__} has no method named '{method_name}'"
        )
        raise

    log.info(f"Using inference method: '{method_name}'")

    log.info("Starting evaluation and rendering...")
    rng, eval_rng = jax.random.split(rng)

    # Create a directory to save the videos
    video_dir = os.path.join(os.getcwd(), cfg.video_dir) # Use current working directory for Hydra output
    os.makedirs(video_dir, exist_ok=True)

    # This function does not exist yet. We will add it to evaling.py
    evaling.do_eval_and_render(
        env,
        agent,
        num_eval_episode=cfg.num_episodes,
        inference_horizon=cfg.inference_horizon,
        rng=eval_rng,
        video_dir=video_dir,
        inference_fn=inference_fn, # Pass the resolved inference function
    )

    log.info(f"Rendering complete. Videos saved in {video_dir}")


if __name__ == "__main__":
    main()
