import os
import time
from functools import partial

import jax
import jax.numpy as jnp
import optax
import orbax.checkpoint as ocp
from flax import nnx
from tqdm import trange

from model.data_processing.data_loaders import KinematicBatch

os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"


class CtrlModel:
    """
    Model:
    Used for partial IK model: (x, y, z) -> (j1, j2, j3, j4, j5, j6)
    Used for FK model: (j1, j2, j3, j4, j5, j6) -> (x, y, z)
    """

    def __init__(self, model: nnx.Module, max_grad_norm=1, lr=1e-3, weight_decay=1e-4, model_file: str = None):
        # define the model
        self.model = model
        opt = optax.chain(
            optax.clip_by_global_norm(max_norm=max_grad_norm), optax.adamw(learning_rate=lr, weight_decay=weight_decay)
        )
        self.opt = nnx.Optimizer(self.model, opt)

        if model_file is not None:
            self.load_ckpt(model_file)

    def __call__(self, pos):
        """
        the inputs should be normed in [-1, 1]
        return the normed joints in [-1, 1]
        """
        self.model.eval()
        if pos.ndim == 1:
            # add batch dim
            pos = pos[None, :]
        return self._jit_call(self.model, pos)  # (B, out_dim)

    @partial(nnx.jit, static_argnums=(0,))
    def _jit_call(self, model: nnx.Module, x):
        # return denormalize_joint(model(normalize_end(x)))
        return model(x)  # jit compiled so that inference can by very fast!

    def update(self, batch: KinematicBatch):
        """
        batch.xyz: (batch_size, 3)
        batch.joint: (batch_size, 6)
        batch.rpy: (batch_size, 3)
        """
        self.model.train()
        info = self.jit_train_step(self.model, self.opt, batch.xyz, batch.joint)
        return info

    @partial(nnx.jit, static_argnums=(0,))
    def jit_train_step(self, model: nnx.Module, optimizer: nnx.Optimizer, inputs: jnp.ndarray, targets: jnp.ndarray):
        # TODO: importance re-weighted L2-loss

        def l2_loss_fn(model: nnx.module):
            predictions = model(inputs)
            loss = jnp.mean((predictions - targets) ** 2)
            return loss, {"loss": loss}

        (_, info), grads = nnx.value_and_grad(l2_loss_fn, has_aux=True)(model)
        optimizer.update(grads)  # inplace update
        return info

    def train(self, dataloader, num_epochs: int, save_as: str, val_data=None):
        from tensorboardX import SummaryWriter

        writer = SummaryWriter(log_dir=f".log/tensorboard/{save_as}_{round(time.time())}")

        avg_loss = 0
        min_val_loss = float("inf")

        for epoch in trange(num_epochs, desc="Train"):

            cnt_batch = 0
            loss_rec = []

            for batch in dataloader:
                info = self.update(batch)
                loss_rec.append(info["loss"].item())
                cnt_batch += 1

            avg_loss = sum(loss_rec) / len(loss_rec)
            writer.add_scalar("train/loss", avg_loss, epoch)

            if (1 + epoch) % 10 == 0:
                print(f"Epoch{epoch+1}: loss = {avg_loss}")

            if (1 + epoch) % 100 == 0:
                if val_data is None:
                    self.save_ckpt(file_name=save_as)
                else:
                    pred_joints = self(val_data.x)
                    val_loss = jnp.mean((pred_joints - val_data.y) ** 2)
                    if val_loss < min_val_loss:
                        min_val_loss = val_loss
                        self.save_ckpt(file_name=save_as)
                    print(f"Epoch{epoch+1}: val_loss = {val_loss}")
                    writer.add_scalar("val/loss", val_loss, epoch)

        self.save_ckpt(file_name=save_as)

    def save_ckpt(self, file_name: str, save_dir: str = "checkpoints"):

        if not os.path.exists(save_dir):
            os.mkdir(save_dir)

        # Checkpoint path should be absolute
        path = os.path.join(os.getcwd(), save_dir, file_name)
        ckptr = ocp.AsyncCheckpointer(ocp.StandardCheckpointHandler())
        state = nnx.state(self.model)
        ckptr.save(path, args=ocp.args.StandardSave(state), force=True)
        ckptr.wait_until_finished()
        print(f"Successfully save model to {path}")

    def load_ckpt(self, file_name: str, save_dir: str = "checkpoints", cpu: bool = False):

        path = os.path.join(os.getcwd(), save_dir, file_name)
        state = nnx.state(self.model)
        # Load the parameters
        ckptr = ocp.AsyncCheckpointer(ocp.StandardCheckpointHandler())
        restored = ckptr.restore(path, args=ocp.args.StandardRestore(state))
        nnx.update(self.model, restored)
        print(f"Successfully load model from {path}")
        return self.model


if __name__ == "__main__":

    def test_model_save_and_load():
        from model.networks.mlp import MLP

        network = MLP(rngs=nnx.Rngs(jax.random.PRNGKey(0)), layer_dims=[6, 64, 64, 6])
        model = CtrlModel(network)

        model.save_ckpt("test.ckp")
        model.load_ckpt("test.ckp")
