import datetime
import os

import flax.serialization
from tensorboardX import SummaryWriter
import grain.python as grain
import hydra
import jax
import jax.numpy as jnp
import ogbench
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm

from ne_flow import agent as agent_lib
from ne_flow.grain_dataset import (
    GCSamplingConfig,
    GCSamplingSource,
    TrajectoryData,
)


@hydra.main(config_path="../configs", config_name="tdflow_config", version_base=None)
def main(cfg: DictConfig):
    print("--- Configuration ---")
    print(OmegaConf.to_yaml(cfg))
    print("---------------------")

    # Create checkpoint directory
    timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    save_dir = os.path.join(cfg.checkpoint_dir, cfg.dataset_name, timestamp)
    os.makedirs(save_dir, exist_ok=True)
    print(f"Checkpoints will be saved to: {save_dir}")

    # --- TensorBoard Initialization ---
    log_dir = os.path.join(save_dir, "tb")
    writer = SummaryWriter(log_dir)
    print(f"TensorBoard logs will be saved to: {log_dir}")

    # --- Data Loading Pipeline using Grain ---
    print("Loading environment and ensuring dataset exists...")
    env, _, _ = ogbench.make_env_and_datasets(
        cfg.dataset_name,
        dataset_dir="data_static/",
        compact_dataset=True,
    )
    npz_path = f"data_static/{cfg.dataset_name}.npz"

    trajectory_data = TrajectoryData(npz_path)
    trajectory_data.load_and_verify()

    sampling_config = GCSamplingConfig(
        trajs_obs_len=cfg.trajs_obs_len,
        trajs_act_len=cfg.trajs_act_len,
        max_dist_goal=cfg.max_goal_dist,
        is_act_vel=True,
    )
    source = GCSamplingSource(trajectory_data, sampling_config)

    shard_options = grain.ShardOptions(
        shard_index=0, shard_count=1, drop_remainder=True
    )

    sampler = grain.IndexSampler(
        num_records=len(source),
        shard_options=shard_options,
        shuffle=True,
        num_epochs=None,  # Iterate indefinitely
        seed=cfg.seed,
    )

    operations = [grain.Batch(batch_size=cfg.size_batch, drop_remainder=True)]
    data_loader = grain.DataLoader(
        data_source=source,
        sampler=sampler,
        operations=operations,
        worker_count=cfg.workers,
    )

    # --- Agent Initialization ---
    rng = jax.random.PRNGKey(cfg.seed)
    rng, agent_rng = jax.random.split(rng)

    observation_shape = trajectory_data.data["observations"].shape
    action_shape = trajectory_data.data["actions"].shape

    # Convert DictConfig to a standard dict for agent compatibility
    agent_config_dict = OmegaConf.to_container(cfg, resolve=True)

    # Dynamically build encoder_config_agent0 based on cfg.encoder
    input_dim = observation_shape[1]
    encoder_name = cfg.encoder.name
    
    if encoder_name == "identity":
        output_dim = input_dim
    else:
        output_dim = cfg.encoder.latent_dim # Use user-specified latent_dim

    encoder_config_agent0 = [
        ("proprio", encoder_name, input_dim, output_dim),
    ]
    size_latent_obs = output_dim # The actual latent size

    # Also update cfg.encoder.latent_dim to reflect the actual output_dim for identity
    if encoder_name == "identity":
        cfg.encoder.latent_dim = output_dim

    # 1. Initialize and load pre-trained GCRLAgent0
    print("Initializing pre-trained agent0...")
    agent0_kwargs = {
        "config": agent_config_dict,
        "encoder_config": encoder_config_agent0,
        "size_latent_obs": size_latent_obs,
        "size_act": action_shape[1],
        "rng": rng, # Use a fresh rng for agent0 init
    }
    agent0 = agent_lib.GCRLAgent0(**agent0_kwargs)
    print(f"Loading agent0 parameters from {cfg.agent0_checkpoint_path}...")
    with open(cfg.agent0_checkpoint_path, "rb") as f:
        param_bytes = f.read()
    loaded_params = flax.serialization.from_bytes(
        agent0.combined_state.ema_params, param_bytes
    )
    agent0.combined_state = agent0.combined_state.replace(ema_params=loaded_params)
    
    # Extract agent0's encoder model and its EMA parameters for dynamic encoding
    agent0_encoder_model = agent0.encoder_model
    agent0_encoder_ema_params = agent0.combined_state.ema_params["encoder"]

    # 2. Initialize TDFlowAgent
    print("Initializing TDFlowAgent...")
    tdflow_kwargs = {
        "config": agent_config_dict,
        "size_latent_obs": size_latent_obs, # TDFlowAgent operates on latent space
        "size_act": action_shape[1],
        "rng": agent_rng,
        "agent0": agent0, # Pass the initialized agent0 instance
    }
    agent = agent_lib.TDFlowAgent(**tdflow_kwargs)

    # --- Training Loop ---
    data_iter = iter(data_loader)

    for epoch in tqdm(range(cfg.epoch)):
        (
            batch_obs_t,
            batch_obs_g,
            batch_dist,
            batch_traj_obs,
            batch_traj_act,
            batch_other_obs,
        ) = next(data_iter)

        # Convert numpy arrays from dataloader to jax arrays.
        # Dynamically encode raw observations into latent space using agent0's encoder
        # Use jax.lax.stop_gradient to ensure the encoder is frozen during TDFlowAgent training
        rng, encode_rng = jax.random.split(rng) # Use a fresh rng for encoding if needed (not for apply_fn)
        
        # Define a helper for encoding
        def encode_fn(params, obs_dict):
            return agent0_encoder_model.apply({"params": params}, obs_dict["proprio"])

        s_latent = jax.lax.stop_gradient(encode_fn(agent0_encoder_ema_params, {"proprio": jnp.array(batch_obs_t)}))
        g_latent = jax.lax.stop_gradient(encode_fn(agent0_encoder_ema_params, {"proprio": jnp.array(batch_obs_g)}))
        s_prime_latent = jax.lax.stop_gradient(encode_fn(agent0_encoder_ema_params, {"proprio": jnp.array(batch_traj_obs)[:, 1, :]})) # Second observation of the sequence
        
        # Action is still raw action
        a = jnp.array(batch_traj_act)[:, 0, :] # First action of the sequence

        rng, train_rng = jax.random.split(rng)
        _loss = agent.train(
            train_rng,
            s_latent,
            a,
            s_prime_latent,
            g_latent,
            agent0.combined_state.ema_params, # Pass agent0's full EMA params for its internal use
        )

        # --- TensorBoard Logging -- -
        for k, v in _loss.items():
            writer.add_scalar(f"loss/{k}", v, epoch)

        if (epoch + 1) % cfg.eval_every == 0:
            rng, eval_rng = jax.random.split(rng)

            # Save checkpoint
            params_to_save = agent.v_field_state.ema_params
            param_bytes = flax.serialization.to_bytes(params_to_save)
            filepath = os.path.join(save_dir, f"checkpoint_{epoch + 1:07d}.msgpack")
            with open(filepath, "wb") as f:
                f.write(param_bytes)
            print(f"Saved checkpoint to {filepath}")

    writer.close()


if __name__ == "__main__":
    main()
