import logging
import os
from functools import partial
from typing import Callable, Sequence

import jax
import jax.numpy as jnp
import optax
import orbax.checkpoint as ocp
from flax import nnx

from model.diffusion.schedules import cosine_beta_schedule
from model.networks.mlp import DiffusionMLP

log = logging.getLogger(__name__)


class DiffusionPolicy(object):

    def __init__(
        self,
        seed: int,
        action_dim: int,
        obs_dim: int,
        horizon_steps: int = 1,
        cond_steps: int = 1,
        hidden_dims: Sequence[int] = (256, 256),
        model_file: str = None,
        # Various clipping
        denoised_clip_value: float = None,
        randn_clip_value: float = 10,
        # final_action_clip_value: float = None,
        # eps_clip_value=None,  # DDIM only
        # DDPM parameters
        denoising_steps: int = 10,
        predict_epsilon: bool = False,
        use_layernorm: bool = False,
        num_act_samples: int = 100,
        # DDPM sampling
        # training,
        lr: float = 3e-4,
        dropout_rate=None,
        weight_decay=1e-4,
        min_lr: float = 1e-5,
        decay_steps: int = None,
        ema_decay: float = 0.995,
        max_grad_norm: float = 1,
        # DDIM sampling
        use_ddim=False,
        ddim_discretize="uniform",
        ddim_steps=None,
        cfg_guidance: float = 1,
        temperature: float = 1,
    ):
        self.rng = jax.random.PRNGKey(seed)

        self.horizon_steps = horizon_steps
        self.action_dim = action_dim
        self.act_chunk_dim = self.horizon_steps * self.action_dim
        self.obs_dim = obs_dim
        self.obs_cond_steps = cond_steps
        self.obs_chunk_dim = self.obs_cond_steps * self.obs_dim
        self.denoising_steps = int(denoising_steps)
        self.predict_epsilon = predict_epsilon
        self.num_act_samples = num_act_samples

        # Clip noise value at each denoising step
        self.denoised_clip_value = denoised_clip_value

        # Whether to clamp the final sampled action between [-1, 1]
        # self.final_action_clip_value = final_action_clip_value

        # For each denoising step, we clip sampled randn (from standard deviation) such that the sampled action is not too far away from mean
        self.randn_clip_value = randn_clip_value

        # # Clip epsilon for numerical stability
        # self.eps_clip_value = eps_clip_value

        # 854, 768, 483, 233, 233, 142
        # self.joint_weights = jnp.array([1, 0.9, 0.57, 0.27, 0.27, 0.17])[None, :1]  # shape = [1, 6]

        # initialize the model

        if decay_steps is not None:
            lr = optax.cosine_decay_schedule(lr, decay_steps=decay_steps, alpha=min_lr)

        self.model = DiffusionMLP(
            rngs=nnx.Rngs(seed),
            output_dim=self.act_chunk_dim,
            hiddien_dims=hidden_dims,
            cond_dim=self.obs_chunk_dim,
            use_layernorm=use_layernorm,
            dropout_rate=dropout_rate,
        )
        opt = optax.chain(
            optax.clip_by_global_norm(max_norm=max_grad_norm), optax.adamw(learning_rate=lr, weight_decay=weight_decay)
        )
        self.opt = nnx.Optimizer(self.model, opt)
        self.ema_params = nnx.Param(self.model)
        self.ema_decay = ema_decay

        if model_file:
            self.load_ckpt(model_file)

        # gradient guidance: predict the squared error of forward kinematics
        # (6D_joints, 3D_pos) -> scalar
        # self.fk_model = fk_model  # the MLP neural network
        # self.fk_model = lambda x, cond: jnp.sum(fk_model(x) - cond, axis=-1)
        # self.guidance_model = MLP(
        #     seed=seed,
        #     layer_dims=(6 + 3, 256, 256, 1),
        #     activation_type="Mish",
        #     use_layernorm=False,
        # )

        # classifier free guidance
        self.cfg_guidance = cfg_guidance
        self.temperature = temperature

        """
        DDPM parameters
        """
        """
        βₜ
        """
        self.betas = cosine_beta_schedule(denoising_steps)
        """
        αₜ = 1 - βₜ
        """
        self.alphas = 1.0 - self.betas
        """
        α̅ₜ= ∏ᵗₛ₌₁ αₛ 
        """
        self.alphas_cumprod = jnp.cumprod(self.alphas, axis=0)
        """
        √ α̅ₜ
        """
        self.sqrt_alphas_cumprod = jnp.sqrt(self.alphas_cumprod)
        """
        α̅ₜ₋₁
        """
        self.alphas_cumprod_prev = jnp.concat([jnp.ones(1), self.alphas_cumprod[:-1]])
        """
        √ 1-α̅ₜ
        """
        self.sqrt_one_minus_alphas_cumprod = jnp.sqrt(1.0 - self.alphas_cumprod)
        """
        √ 1\α̅ₜ
        """
        self.sqrt_recip_alphas_cumprod = jnp.sqrt(1.0 / self.alphas_cumprod)
        """
        √ 1\α̅ₜ-1
        """
        self.sqrt_recipm1_alphas_cumprod = jnp.sqrt(1.0 / self.alphas_cumprod - 1)
        """
        β̃ₜ = σₜ² = βₜ (1-α̅ₜ₋₁)/(1-α̅ₜ) equation (7) in DDPM
        """
        self.ddpm_var = self.betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
        self.ddpm_std = jnp.sqrt(self.ddpm_var)
        self.ddpm_logvar_clipped = jnp.log(jnp.clip(self.ddpm_var, min=1e-20))
        """
        μₜ = β̃ₜ √ α̅ₜ₋₁/(1-α̅ₜ)x₀ + √ αₜ (1-α̅ₜ₋₁)/(1-α̅ₜ)xₜ equation (7) in DDPM
        """
        self.ddpm_mu_coef1 = self.betas * jnp.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
        self.ddpm_mu_coef2 = (1.0 - self.alphas_cumprod_prev) * jnp.sqrt(self.alphas) / (1.0 - self.alphas_cumprod)

    def update(self, actions, conditions):
        self.rng, info = self.jit_train_step(self.rng, self.model, self.opt, actions, conditions)
        return info

    def __call__(self, obs: jnp.ndarray, guidance_fn: nnx.Module = None):
        """
        predict the action chunk by default: (B, horizon_steps, action_dim)
        guidance_fn: if None, it takes action and condition as inputs and returns a scalar
        """
        # obs: (B, history_steps, obs_dim)
        B = len(obs)

        if guidance_fn is not None:
            guidance_fn.eval()
            # vmap over the batch dimension for both a and cond, while keeping the model fixed
            grad_fn = jax.vmap(jax.grad(guidance_fn, argnums=0), in_axes=(0, 0))
        else:
            grad_fn = None

        x0, self.rng = self.jit_ddpm_sampling(
            self.rng, self.model, obs.repeat(self.num_act_samples, axis=0), grad_guidance_fn=grad_fn
        )
        return x0.reshape(B, self.num_act_samples, self.horizon_steps, self.action_dim)

    @partial(nnx.jit, static_argnums=(0,))
    def recon_loss(self, model: nnx.Module, rng, x0: jnp.ndarray, cond: jnp.ndarray):
        B = len(x0)
        x0 = x0.reshape(B, -1)
        rng, time_key, noise_key = jax.random.split(rng, 3)
        t = jax.random.randint(time_key, (B, 1), 0, self.denoising_steps)
        noise = jax.random.normal(noise_key, x0.shape)
        xt = self.sqrt_alphas_cumprod[t] * x0 + self.sqrt_one_minus_alphas_cumprod[t] * noise
        model_output = model(xt, t, cond=cond)
        if self.predict_epsilon:
            l2_loss = jnp.sum((model_output - noise) ** 2, axis=-1)
        else:
            l2_loss = jnp.sum((model_output - x0) ** 2, axis=-1)

        loss = jnp.mean(l2_loss)

        return loss, {"loss": loss, "rng": rng}

    @partial(nnx.jit, static_argnums=(0,))
    def jit_train_step(self, rng, model: nnx.Module, optimizer: nnx.Optimizer, actions, cond):

        B = len(actions)
        x0 = actions.reshape(B, -1)
        rng, time_key, noise_key, cfg_key = jax.random.split(rng, 4)
        t = jax.random.randint(time_key, (B, 1), 0, self.denoising_steps)
        noise = jax.random.normal(noise_key, x0.shape)
        xt = self.sqrt_alphas_cumprod[t] * x0 + self.sqrt_one_minus_alphas_cumprod[t] * noise

        def recon_loss(net: nnx.Module):
            # cfg_guidance:
            if self.cfg_guidance != 1:
                model_output = net(xt, t, cond=cond * (jax.random.uniform(cfg_key, (B, 1)) > 0.1))
            else:
                model_output = net(xt, t, cond=cond)
            if self.predict_epsilon:
                loss = jnp.mean((model_output - noise) ** 2)
            else:
                loss = jnp.mean((model_output - x0) ** 2)
            return loss, {"loss": loss, "rng": rng}

        # the first argument should be the model
        (_, info), grads = nnx.value_and_grad(recon_loss, has_aux=True)(model)
        optimizer.update(grads)  # inplace update
        rng = info.pop("rng")
        return rng, info

    @partial(nnx.jit, static_argnames=("self", "grad_guidance_fn"))
    def jit_ddpm_sampling(self, rng, model: nnx.Module, cond: jnp.ndarray, grad_guidance_fn: Callable = None):
        B = len(cond)
        """
        guidance_fn(xt, cond) so that the gradient is applied to xt
        """

        @nnx.scan(in_axes=(nnx.Carry, 0))
        def p_sampling(input_tuple, t):
            xt, rng_ = input_tuple
            batch_t = jnp.full((B, 1), t)
            model_out = model(xt, batch_t, cond=cond)

            # classifier free guidance
            if self.cfg_guidance != 1:
                unc_pred = model(xt, batch_t, cond=jnp.zeros_like(cond))
                eps_pred = self.cfg_guidance * model_out + (1 - self.cfg_guidance) * unc_pred  # lambda in CFG

            if self.predict_epsilon:
                # re-parameterization of distribution (4) in DDPM paper
                eps_pred = model_out
                x_recon = self.sqrt_recip_alphas_cumprod[t] * xt - self.sqrt_recipm1_alphas_cumprod[t] * eps_pred
            else:
                # value prediction (consistency models)
                x_recon = model_out

            if self.denoised_clip_value:
                x_recon = x_recon.clip(-self.denoised_clip_value, self.denoised_clip_value)
                # equation (7) in DDPM paper, here using x_recon is just for clipping

            xt_mu = self.ddpm_mu_coef1[t] * x_recon + self.ddpm_mu_coef2[t] * xt

            if grad_guidance_fn is not None:
                # the ensemble gradient do not work?
                guidance_grad = grad_guidance_fn(xt_mu, cond)
                xt_mu = xt_mu + self.ddpm_var[t] * guidance_grad

            rng_, key_ = jax.random.split(rng_)
            noise = jax.random.normal(key_, shape=xt.shape).clip(-self.randn_clip_value, self.randn_clip_value)
            xt = xt_mu + (t > 1) * (self.ddpm_std[t] * noise) * self.temperature
            return (xt, rng_), ()

        sampling_key, xT_key = jax.random.split(rng)
        xT = jax.random.normal(xT_key, (B, self.act_chunk_dim))
        output_tuple, () = p_sampling((xT, sampling_key), jnp.arange(self.denoising_steps, 0, -1))

        return output_tuple  # (action_0, rng)

    def save_ckpt(self, file_name: str, save_dir: str = "checkpoints"):

        if not os.path.exists(save_dir):
            os.mkdir(save_dir)

        # Checkpoint path should be absolute
        path = os.path.join(os.getcwd(), save_dir, file_name)
        ckptr = ocp.AsyncCheckpointer(ocp.StandardCheckpointHandler())
        state = nnx.state(self.model)
        ckptr.save(path, args=ocp.args.StandardSave(state), force=True)
        ckptr.wait_until_finished()
        print(f"Successfully save model to {path}")

    def load_ckpt(self, file_name: str, save_dir: str = "checkpoints", cpu: bool = False):

        path = os.path.join(os.getcwd(), save_dir, file_name)
        state = nnx.state(self.model)
        # Load the parameters
        ckptr = ocp.AsyncCheckpointer(ocp.StandardCheckpointHandler())
        restored = ckptr.restore(path, args=ocp.args.StandardRestore(state))
        nnx.update(self.model, restored)
        print(f"Successfully load model from {path}")
        return self.model


if __name__ == "__main__":
    from model.networks.mlp import DiffusionMLP

    d_obs = 20
    n_obs = 2
    d_act = 6
    horizon = 5

    self = DiffusionPolicy(
        seed=0,
        horizon_steps=horizon,
        obs_dim=d_obs,
        cond_steps=n_obs,
        action_dim=d_act,
        denoising_steps=10,
        num_act_samples=100,
    )

    class DummyGuid(nnx.Module):
        """
        Without batch dimension!
        return a scalar
        """

        def __init__(self):
            self.layer = nnx.Linear(d_act * horizon, 1, rngs=nnx.Rngs(0))

        def __call__(self, a, cond):

            return jnp.mean((self.layer(a.reshape(-1))) ** 2)

    dummy_guid = DummyGuid()

    dummy_a = jax.random.normal(self.rng, (256, horizon, d_act))
    dummy_cond = jax.random.normal(self.rng, (256, n_obs, d_obs))

    # # 修复梯度函数：确保输入维度匹配
    # def guidance_fn(a, cond):
    #     return dummy_guid(a, cond)

    dummy_guid(dummy_a[0], dummy_cond[0])

    grad_fn = jax.vmap(jax.grad(dummy_guid, argnums=0), in_axes=(0, 0))
    grad = grad_fn(dummy_a, dummy_cond)

    act = self(dummy_cond, dummy_guid)
    self.update(dummy_a, dummy_cond)
