import datetime
import os

import flax.serialization
import grain.python as grain
import hydra  # Import hydra
import jax
import jax.numpy as jnp
import ogbench
from omegaconf import DictConfig, OmegaConf  # Import DictConfig, OmegaConf
from tqdm import tqdm

from ne_flow import agent as agent_lib
from ne_flow import evaling
from ne_flow.grain_dataset import (
    GCSamplingConfig,
    GCSamplingSource,
    TrajectoryData,
)


@hydra.main(
    config_path="../configs", config_name="config", version_base=None
)  # Use hydra.main
def main(cfg: DictConfig):  # Accept cfg object
    print("--- Configuration ---")
    print(OmegaConf.to_yaml(cfg))
    print("---------------------")

    # Create checkpoint directory
    timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    save_dir = os.path.join(cfg.checkpoint_dir, cfg.dataset_name, timestamp)
    os.makedirs(save_dir, exist_ok=True)
    print(f"Checkpoints will be saved to: {save_dir}")

    # --- New Data Loading Pipeline using Grain ---

    # 1. Ensure data exists using ogbench and get the path
    print("Loading environment and ensuring dataset exists...")
    env, _, _ = ogbench.make_env_and_datasets(
        cfg.dataset_name,
        dataset_dir="data_static/",
        compact_dataset=True,
    )
    npz_path = f"data_static/{cfg.dataset_name}.npz"

    # 2. Load data into memory and verify
    trajectory_data = TrajectoryData(npz_path)
    trajectory_data.load_and_verify()

    # 3. Create data source and sampler
    sampling_config = GCSamplingConfig(
        trajs_obs_len=cfg.trajs_obs_len,
        trajs_act_len=cfg.trajs_act_len,
        max_dist_goal=cfg.max_goal_dist,
        is_act_vel=True,
    )
    source = GCSamplingSource(trajectory_data, sampling_config)

    # For a single host, the shard options are simple.
    shard_options = grain.ShardOptions(
        shard_index=0, shard_count=1, drop_remainder=True
    )

    sampler = grain.IndexSampler(
        num_records=len(source),
        shard_options=shard_options,
        shuffle=True,
        num_epochs=None,  # Iterate indefinitely
        seed=cfg.seed,
    )

    # 4. Create the DataLoader
    # Batching is defined as an operation.
    operations = [grain.Batch(batch_size=cfg.size_batch, drop_remainder=True)]
    data_loader = grain.DataLoader(
        data_source=source,
        sampler=sampler,
        operations=operations,
        worker_count=cfg.workers,
    )

    # Initialize agent
    rng = jax.random.PRNGKey(cfg.seed)
    rng, agent_rng = jax.random.split(rng)

    agent_class = agent_lib.GCRLAgent0  # Explicitly set agent class

    # Get observation and action shapes from the loaded data
    observation_shape = trajectory_data.data["observations"].shape
    action_shape = trajectory_data.data["actions"].shape

    # --- Dynamically build encoder_config based on cfg.encoder ---
    input_dim = observation_shape[1]
    encoder_name = cfg.encoder.name

    if encoder_name == "identity":
        output_dim = input_dim
    else:
        output_dim = cfg.encoder.latent_dim  # Use user-specified latent_dim

    encoder_config = [
        ("proprio", encoder_name, input_dim, output_dim),
    ]
    size_latent_obs = output_dim  # The actual latent size

    # Also update cfg.encoder.latent_dim to reflect the actual output_dim for identity
    if encoder_name == "identity":
        cfg.encoder.latent_dim = output_dim

    agent_kwargs = {
        "config": cfg,  # Pass the full cfg object
        "encoder_config": encoder_config,
        "size_latent_obs": size_latent_obs,
        "size_act": action_shape[1],
        "rng": agent_rng,
    }

    agent = agent_class(**agent_kwargs)

    # Training loop
    data_iter = iter(data_loader)
    # data_iter = flax.jax_utils.prefetch_to_device(data_iter, 2) # Prefetching can be added back if needed

    for epoch in tqdm(range(cfg.epoch)):  # Use cfg
        # Batches are now tuples of numpy arrays from the DataLoader
        (
            batch_obs_t,
            batch_obs_g,
            batch_dist,
            batch_traj_obs,
            batch_traj_act,
            batch_other_obs,
        ) = next(data_iter)

        # The old iterator wrapped observations in a dict. We replicate that structure here.
        # Also convert numpy arrays from dataloader to jax arrays.
        batch_obs = {"proprio": jnp.array(batch_obs_t)}
        batch_goal = {"proprio": jnp.array(batch_obs_g)}
        batch_traj_obs = {"proprio": jnp.array(batch_traj_obs)}
        batch_other_obs = {"proprio": jnp.array(batch_other_obs)}
        batch_dist = jnp.array(batch_dist)
        batch_traj_act = jnp.array(batch_traj_act)

        rng, train_rng = jax.random.split(rng)
        _loss = agent.train(
            train_rng,
            batch_obs,
            batch_goal,
            batch_dist,
            batch_traj_obs,
            batch_traj_act,
            batch_other_obs,
        )

        # print(f"Epoch {epoch}, Agent: {args.agent}, Loss: {loss:.4f}")

        if (epoch + 1) % cfg.eval_every == 0:  # Use cfg
            rng, eval_rng = jax.random.split(rng)
            overall_success, success_length = evaling.do_eval_stats(
                env,
                agent,
                cfg.num_eval_episode,  # Use cfg
                cfg.inference_horizon,  # Use cfg
                eval_rng,
            )
            print(
                f"Epoch {epoch}, Eval Success: {overall_success:.4f}, "
                f"Eval Length: {success_length:.2f}"
            )

            # Save checkpoint
            params_to_save = agent.combined_state.ema_params
            param_bytes = flax.serialization.to_bytes(params_to_save)
            filepath = os.path.join(save_dir, f"checkpoint_{epoch + 1:07d}.msgpack")
            with open(filepath, "wb") as f:
                f.write(param_bytes)
            print(f"Saved checkpoint to {filepath}")


if __name__ == "__main__":
    main()
