import logging
import time
from functools import partial

import jax
import jax.numpy as jnp
import numpy as np
from flax import nnx
from tqdm import trange

from model.data_processing.data_loaders import KinematicBatch, Transition
from model.diffusion.diffusion import DiffusionPolicy

logger = logging.getLogger(__name__)


class FKGuidance(nnx.Module):
    """
    Not for batch processing!
    """

    def __init__(self, fk_model: nnx.Module):
        super().__init__()
        self.fk_model = fk_model
        self.fk_model.eval()

    def __call__(self, joint, cond):
        """
        normalize the L2 distance to prevent too large gradient
        negative sign to minimize the distance
        """
        pred_xyz = self.fk_model(joint)
        return -jnp.mean(jnp.sqrt((pred_xyz - cond) ** 2)) / jnp.linalg.norm(cond)


class DiffusionIKModel(DiffusionPolicy):
    """
    Model to predict joint actions
    using diffusion model requires normalization!

    Add joint weights
    """

    def __init__(self, in_dim: int, out_dim: int, hidden_dims=(512, 512, 512), fk_model: nnx.Module = None, **kwargs):
        super().__init__(
            seed=kwargs.get("seed", 0), obs_dim=in_dim, action_dim=out_dim, hidden_dims=hidden_dims, **kwargs
        )
        self.fk_model = fk_model
        if self.fk_model is not None:
            # vmap over the batch dimension for both a and cond, while keeping the model fixed
            self.grad_fn = jax.vmap(jax.grad(FKGuidance(self.fk_model), argnums=0), in_axes=(0, 0))
        else:
            self.grad_fn = None

    def set_fk_model(self, fk_model: nnx.Module):
        self.fk_model = fk_model
        if self.fk_model is not None:
            # vmap over the batch dimension for both a and cond, while keeping the model fixed
            self.grad_fn = jax.vmap(jax.grad(FKGuidance(self.fk_model), argnums=0), in_axes=(0, 0))
        else:
            self.grad_fn = None

    def __call__(self, xyz):
        """
        x0.shape = (B * num_act * horizon, dim_a)
        """
        self.model.eval()
        if xyz.ndim == 1:
            # add batch dim
            xyz = xyz[None, :]
        B = len(xyz)

        x0, self.rng = self.jit_ddpm_sampling(
            self.rng,
            self.model,
            xyz.repeat(
                self.num_act_samples,
                axis=0,
            ),
            grad_guidance_fn=self.grad_fn,
        )

        if self.fk_model is None:
            # TODO: currently only execute the first action and one sampled actions.
            # may work for action chunks or action selection
            return x0.reshape(B, self.num_act_samples, self.horizon_steps, self.action_dim)[:, 0, 0]

        self.fk_model.eval()
        return self.jit_select_action(x0, xyz, self.fk_model)

    @partial(nnx.jit, static_argnums=(0,))
    def jit_select_action(self, x0, xyz, fk_model):
        B = xyz.shape[0]
        fk_loss = jnp.sum((fk_model(x0) - xyz.repeat(self.num_act_samples, axis=0)) ** 2, axis=-1).reshape(
            B, self.num_act_samples
        )
        selected_indices = fk_loss.argmin(axis=-1)
        return x0.reshape(B, self.num_act_samples, self.action_dim)[jnp.arange(B), selected_indices]

    def update(self, batch: KinematicBatch):
        self.model.train()
        self.rng, info = self.jit_train_step(self.rng, self.model, self.opt, batch.joint, batch.xyz)
        return info


class DiffusionPlanningModel(DiffusionPolicy):
    """
    weights are used to balance the loss of directions and orientation
    scaling the action to prevent too small action signal scales in meters and radians
    """

    def __init__(self, in_dim: int, out_dim: int, hidden_dims=(512, 512, 512), radius=0.01, scale=1.0, **kwargs):
        super().__init__(
            seed=kwargs.get("seed", 0), obs_dim=in_dim, action_dim=out_dim, hidden_dims=hidden_dims, **kwargs
        )
        self.weights = jnp.concat([jnp.ones(3), jnp.ones(3) * radius], axis=-1).repeat(self.horizon_steps, axis=-1)
        self.scale = scale
        # self.weights = self.weights.repeat(self.horizon_steps, axis=-1)

    def __call__(self, cond: jnp.ndarray):
        if cond.ndim == 1:
            cond = cond[None, :]
        B = len(cond)
        self.model.eval()
        x0, self.rng = self.jit_ddpm_sampling(self.rng, self.model, cond.repeat(self.num_act_samples, axis=0))
        # TODO: sample selection? currently: only the first sample
        return x0.reshape(B, self.num_act_samples, self.horizon_steps, -1) / self.scale

    def update(self, batch: Transition):
        self.model.train()
        self.rng, info = self.jit_train_step(self.rng, self.model, self.opt, batch.action, batch.condition)
        return info

    @partial(nnx.jit, static_argnums=(0,))
    def jit_train_step(self, rng, model: nnx.Module, optimizer: nnx.Optimizer, actions, cond):

        B = len(actions)
        x0 = actions.reshape(B, -1) * self.scale
        rng, time_key, noise_key, cfg_key = jax.random.split(rng, 4)
        t = jax.random.randint(time_key, (B, 1), 0, self.denoising_steps)
        noise = jax.random.normal(noise_key, x0.shape)
        xt = self.sqrt_alphas_cumprod[t] * x0 + self.sqrt_one_minus_alphas_cumprod[t] * noise

        def recon_loss(net: nnx.Module):
            # cfg_guidance:
            if self.cfg_guidance != 1:
                model_output = net(xt, t, cond=cond * (jax.random.uniform(cfg_key, (B, 1)) > 0.1))
            else:
                model_output = net(xt, t, cond=cond)

            if self.predict_epsilon:
                residual = self.weights * (model_output - noise)
            else:
                residual = self.weights * (model_output - x0)
            loss = jnp.mean(residual**2)
            return loss, {"loss": loss, "rng": rng}

        # the first argument should be the model
        (_, info), grads = nnx.value_and_grad(recon_loss, has_aux=True)(model)
        optimizer.update(grads)  # inplace update
        rng = info.pop("rng")
        return rng, info


if __name__ == "__main__":

    def test_model_save_and_load():
        import hydra
        from omegaconf import DictConfig

        with hydra.initialize(version_base=None, config_path="cfg"):
            ej_cfg: DictConfig = hydra.compose(
                config_name="train_diffusion_planning",
                overrides=[
                    "tag=partial",
                    "model.temperature=0.1",
                    "model.horizon_steps=10",
                    "model.num_act_samples=100",
                ],
            )
            model = hydra.utils.instantiate(ej_cfg.model)

        model.save_ckpt("test.ckp")
        model.load_ckpt("test.ckp")

    # from data.data_loaders import KinematicPlanningData
    # from data.jax_dataloader import NumpyLoader

    # horizon_steps = 5

    # self = DiffusionPlanningModel(
    #     in_dim=13,
    #     out_dim=6,
    #     horizon_steps=horizon_steps,
    #     temperature=0.1,
    #     num_act_samples=1,
    #     model_file="diffusion_planning_T10_H10_partial.ckp",
    # )

    # data = KinematicPlanningData(data_file="data/json_data/sim_data_partial_space_0.1.json", horizon_steps=5)
    # data_loader = NumpyLoader(
    #     data,
    #     batch_size=256,
    #     shuffle=True,
    #     num_workers=1,
    #     pin_memory=False,
    # )

    # for batch in data_loader:
    #     break

    # # from model import MlpPartialIKModel

    # # fk_model = MlpPartialIKModel(in_dim=6, out_dim=3, model_file="mlp_normed_FK.ckp").model

    # # # Example usage
    # # self = DiffusionPartialIKModel(
    # #     in_dim=3,
    # #     out_dim=6,
    # #     model_file="diffusion_end_joint_v0.ckp",
    # #     dropout_rate=0.1,
    # #     # model_file="diffusion_end_joint_nodropout_v1.ckp",
    # #     temperature=0.1,
    # #     num_act_samples=100,
    # #     # fk_model=EnsembleFKModel(in_dim=6, out_dim=3, num_heads=10, model_file="ensemble_FK.ckp"),
    # #     fk_model=fk_model,
    # # )

    # # ej_model = DiffusionPartialIKModel(
    # #     in_dim=3,
    # #     out_dim=6,
    # #     model_file="diffusion_end_joint_T10.ckp",
    # #     # dropout_rate=0.1,
    # #     denoising_steps=100,
    # #     # model_file="diffusion_end_joint_nodropout_v1.ckp",
    # #     temperature=0.1,
    # #     num_act_samples=100,
    # #     # fk_model=EnsembleFKModel(in_dim=6, out_dim=3, num_heads=10, model_file="ensemble_FK.ckp"),
    # #     fk_model=fk_model,
    # # )

    # # sampled_xyz = np.random.uniform(0, 1, (256, 3)) * 2 - 1

    # # pred_j = self(sampled_xyz)

    # # # fk_model=EnsembleFKModel(in_dim=6, out_dim=3, num_heads=10, model_file="ensemble_FK.ckp")
    # # print("average distance loss:", jnp.sum((fk_model(pred_j.clip(-1, 1)) - sampled_xyz) ** 2, axis=-1).mean())
