import logging
import time
from functools import partial

import jax
import jax.numpy as jnp
import numpy as np
from flax import nnx
from tqdm import trange

from model.diffusion.diffusion import DiffusionPolicy
from model.transforms import denormalize_end, denormalize_joint

logger = logging.getLogger(__name__)


class DiffusionCtrlModel(DiffusionPolicy):
    """
    Model to predict joint actions
    using diffusion model requires normalization!

    Add joint weights
    """

    def __init__(self, in_dim: int, out_dim: int, hidden_dims=(512, 512, 512), fk_model=None, **kwargs):
        super().__init__(
            seed=kwargs.get("seed", 0), obs_dim=in_dim, action_dim=out_dim, hidden_dims=hidden_dims, **kwargs
        )
        self.fk_model = fk_model

    def __call__(self, normed_pos):
        """
        the inputs should be normed in [-1, 1]
        return the normed joints in [-1, 1]
        """
        self.model.eval()
        if normed_pos.ndim == 1:
            # add batch dim
            normed_pos = normed_pos[None, :]
        B = len(normed_pos)
        x0, self.rng = self.jit_ddpm_sampling(
            self.rng, self.model, normed_pos.repeat(self.num_act_samples, axis=0)  # , self.fk_model.model
        )

        # x0.shape = (B * num_act * horizon, dim_a)

        if self.fk_model is None:
            # TODO: currently only execute the first action and one sampled actions. may work for action chunks or action selection
            return x0.reshape(B, self.num_act_samples, self.horizon_steps, self.action_dim)[:, 0, 0]

        return self.jit_select_action(x0, normed_pos, self.fk_model.model)

        # assert self.horizon_steps=1
        # fk_loss = jnp.sum(
        #     (self.fk_model(x0).mean(axis=0) - normed_pos.repeat(self.num_act_samples, axis=0)) ** 2, axis=-1
        # ).reshape(B, self.num_act_samples)
        # selected_indices = fk_loss.argmin(axis=-1)
        # selected_actions = x0.reshape(B, self.num_act_samples, self.action_dim)[jnp.arange(B), selected_indices]
        # # to check the fk model
        # # print("predicted xyz:", denormalize_end(self.fk_model(selected_actions)))
        # return selected_actions

    @partial(nnx.jit, static_argnums=(0,))
    def jit_select_action(self, x0, normed_pos, fk_model):
        B = normed_pos.shape[0]
        fk_loss = jnp.sum(
            (fk_model(x0).mean(axis=0) - normed_pos.repeat(self.num_act_samples, axis=0)) ** 2, axis=-1
        ).reshape(B, self.num_act_samples)
        selected_indices = fk_loss.argmin(axis=-1)
        return x0.reshape(B, self.num_act_samples, self.action_dim)[jnp.arange(B), selected_indices]

    def update(self, batch):
        self.model.train()
        self.rng, info = self.jit_train_step(self.rng, self.model, self.opt, batch.y, batch.x)
        return info

    @partial(nnx.jit, static_argnums=(0,))
    def jit_train_step(self, rng, model: nnx.Module, optimizer: nnx.Optimizer, actions, cond):
        """
        apply weighted loss for different joints.

        The loss is calculated in the degree space (need denormalization!)
        """
        B = len(actions)
        x0 = actions.reshape(B, -1)
        rng, time_key, noise_key, cfg_key = jax.random.split(rng, 4)
        t = jax.random.randint(time_key, (B, 1), 0, self.denoising_steps)
        noise = jax.random.normal(noise_key, x0.shape)
        xt = self.sqrt_alphas_cumprod[t] * x0 + self.sqrt_one_minus_alphas_cumprod[t] * noise

        def recon_loss(net: nnx.Module):
            # cfg_guidance:
            if self.cfg_guidance != 1:
                model_output = net(xt, t, cond=cond * (jax.random.uniform(cfg_key, (B, 1)) > 0.1))
            else:
                model_output = net(xt, t, cond=cond)

            if self.predict_epsilon:
                mse = (denormalize_joint(model_output) - denormalize_joint(noise)) ** 2  # (B, 12)
            else:
                mse = (denormalize_joint(model_output) - denormalize_joint(x0)) ** 2  # (B, 12)

            loss = jnp.mean(mse)
            return loss, {"loss": loss, "rng": rng}

        # the first argument should be the model
        (_, info), grads = nnx.value_and_grad(recon_loss, has_aux=True)(model)
        optimizer.update(grads)  # inplace update
        rng = info.pop("rng")
        return rng, info

    def train(self, dataloader, num_epochs: int, save_as: str, val_data=None):
        from tensorboardX import SummaryWriter

        avg_loss = 0
        min_val_loss = float("inf")
        writer = SummaryWriter(log_dir=f".log/tensorboard/{save_as}_{round(time.time())}")

        for epoch in trange(num_epochs):

            cnt_batch = 0
            loss_rec = []

            for batch in dataloader:

                info = self.update(batch)
                loss_rec.append(info["loss"].item())
                cnt_batch += 1

            avg_loss = sum(loss_rec) / len(loss_rec)
            writer.add_scalar("train/loss", avg_loss, epoch)

            if (1 + epoch) % 10 == 0:
                logger.info(f"Epoch{epoch+1}: loss = {avg_loss}")

            if (1 + epoch) % 100 == 0:
                if val_data is None:
                    self.save_ckpt(file_name=save_as)
                else:
                    pred_joints = self(val_data.x)
                    val_loss = jnp.mean((pred_joints - val_data.y) ** 2)
                    if val_loss < min_val_loss:
                        min_val_loss = val_loss
                        self.save_ckpt(file_name=save_as)
                    writer.add_scalar("val/loss", val_loss, epoch)

        self.save_ckpt(file_name=save_as)


class DiffusionIKModel(DiffusionCtrlModel):
    """
    self.model act as a 6d -> 6d ik model
    orien_pred_model try to predict the (rx, ry, rz) given the target xyz
    """

    def __init__(self, orien_model: nnx.Module, **kwargs):
        self.orien_model = orien_model
        super().__init__(**kwargs)
        """
        Given the current xyz and the goal xyz -> predict the trajectories
        """

    @partial(nnx.jit, static_argnums=(0,))
    def jit_pred_6d(self, orien_model: nnx.Module, tar_xyz):
        return jnp.concat([tar_xyz, orien_model(tar_xyz)], axis=-1)

    def __call__(self, tar_xyz: jnp.ndarray):
        if tar_xyz.ndim == 1:
            tar_xyz = tar_xyz[None, :]

        # predict the orientation (rx, ry, rz)
        self.orien_model.eval()
        target_6d = self.jit_pred_6d(self.orien_model, tar_xyz)  # (B, 6)
        return super().__call__(target_6d)

        # cond = jnp.concat([current_xyz, tar_xyz - current_xyz], axis=-1)  # (B, 6)
        # B = len(tar_xyz)
        # x0, self.rng = self.jit_ddpm_sampling(self.rng, self.model, cond)
        # return x0.reshape(B, self.num_act_samples, self.horizon_steps, -1)
        # # return denormalize_joint(x0.reshape(B, self.num_act_samples, self.horizon_steps, -1))


class DiffusionPlanningModel(DiffusionCtrlModel):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        """
        Given the current xyz and the goal xyz -> predict the trajectories
        """

    def __call__(self, current_xyz: jnp.ndarray, tar_xyz: jnp.ndarray):
        if tar_xyz.ndim == 1:
            tar_xyz = tar_xyz[None, :]

        if current_xyz.ndim == 1:
            current_xyz = current_xyz[None, :]

        cond = jnp.concat([current_xyz, tar_xyz - current_xyz], axis=-1)  # (B, 6)
        B = len(tar_xyz)
        x0, self.rng = self.jit_ddpm_sampling(self.rng, self.model, cond)
        return x0.reshape(B, self.num_act_samples, self.horizon_steps, -1)
        # return denormalize_joint(x0.reshape(B, self.num_act_samples, self.horizon_steps, -1))


if __name__ == "__main__":
    from model import EnsembleFKModel, MlpPartialIKModel

    # Example usage
    self = DiffusionCtrlModel(
        in_dim=3,
        out_dim=6,
        model_file="diffusion_end_joint_nodropout_v1.ckp",
        # dropout_rate=0.1,
        # model_file="diffusion_end_joint_nodropout_v1.ckp",
        temperature=0.1,
        num_act_samples=10,
        # fk_model=EnsembleFKModel(in_dim=6, out_dim=3, num_heads=10, model_file="ensemble_FK.ckp"),
        fk_model=MlpPartialIKModel(in_dim=6, out_dim=3, model_file="mlp_normed_FK.ckp"),
    )

    fk_model = EnsembleFKModel(in_dim=6, out_dim=3, num_heads=10, model_file="ensemble_FK.ckp")

    orien_model = MlpPartialIKModel(in_dim=3, out_dim=3, model_file="mlp_orien_pred.ckp")
    self = DiffusionIKModel(
        orien_model=orien_model.model,
        in_dim=6,
        out_dim=6,
        model_file="diffusion_ik_slow.ckp",
        dropout_rate=0.1,
        # model_file="diffusion_end_joint_nodropout_v1.ckp",
        temperature=0.0,
        num_act_samples=1,
    )

    sampled_xyz = np.random.uniform(0, 1, (256, 3)) * 2 - 1

    pred_j = self(sampled_xyz)

    # fk_model=EnsembleFKModel(in_dim=6, out_dim=3, num_heads=10, model_file="ensemble_FK.ckp")
    print("average distance loss:", jnp.sum((fk_model(pred_j.clip(-1, 1)) - sampled_xyz) ** 2, axis=-1).mean())


# def train_orien_pred():

#     from data.data_loaders import NormalizedEndJointData, Batch

#     class NormalizedEndOrien(NormalizedEndJointData):
#         """
#         Normaized (x, y, z, rx, ry, rz) -> (j1:j6)
#         """

#         def __init__(self, **kwargs):
#             super().__init__(**kwargs)

#         def __getitem__(self, idx):
#             return Batch(self.end[idx][:3], self.end[idx][:3])

#     data = NormalizedEndOrien(filename="data/json_data/slow.json")
#     data_loader = NumpyLoader(
#         data,
#         batch_size=512,
#         shuffle=True,
#         num_workers=10,
#         pin_memory=False,
#     )
#     model = MLPCtrlModel(in_dim=3, out_dim=3, weight_decay=1e-4)
#     model.train(data_loader, 5000, save_as="mlp_orien_pred.ckp", val_data=None)
