import jax
import optax
from jax import random
from jax import numpy as jnp
from jax.tree_util import tree_map
import flax
from flax.core import freeze, unfreeze
from flax import linen as nn
from flax.training.train_state import TrainState
from flax import core
from PyCmpltrtok.common import sep
from functools import partial
import matplotlib.pyplot as plt
from typing import Any


class TrainStateWithStatVars(TrainState):
    stat_vars: core.FrozenDict[str, Any]

    @classmethod
    def create(cls, *, apply_fn, params, tx, stat_vars, **kwargs):
        opt_state = tx.init(params)
        return cls(
            step=0,
            apply_fn=apply_fn,
            params=params,
            stat_vars=stat_vars,
            tx=tx,
            opt_state=opt_state,
            **kwargs,
        )

    def apply_gradients(self, *, grads, stat_vars_update, **kwargs):
        updates, new_opt_state = self.tx.update(
            grads, self.opt_state, self.params)
        new_params = optax.apply_updates(self.params, updates)
        stat_vars = core.freeze({**self.stat_vars, **stat_vars_update})
        return self.replace(
            step=self.step + 1,
            params=new_params,
            opt_state=new_opt_state,
            stat_vars=stat_vars,
            **kwargs,
        )


class BiasAdderWithRunningMean(nn.Module):
    decay: jnp.float32 = 0.99

    @nn.compact
    def __call__(self, x):
        is_initialized = self.has_variable('batch_stats', 'mean')
        ra_mean = self.variable('batch_stats', 'mean', lambda s: jnp.zeros(s, dtype=jnp.float32), x.shape[1:])
        mean = ra_mean.value  # trigger init
        bias = self.param('bias', lambda rng, shape: jnp.zeros(shape, dtype=jnp.float32), x.shape[1:])
        if is_initialized:
            ra_mean.value = self.decay * ra_mean.value + (1.0 - self.decay) * jnp.mean(x, axis=0, keepdims=True)
        return x - ra_mean.value + bias


x = jnp.ones((10, 5), dtype=jnp.float32)
model = BiasAdderWithRunningMean()
varialbes = model.init(random.PRNGKey(0), x)
batch_stats_wrapper, params = varialbes.pop('params')
del varialbes
tx = optax.sgd(learning_rate=0.02)
trainStateWithStatVars = TrainStateWithStatVars.create(
    apply_fn=model.apply,
    params=params,
    tx=tx,
    stat_vars=batch_stats_wrapper
)


def make_loss_fn(apply_fn):
    def loss(params, stat_vars, x):
        y, update_state = apply_fn({'params': params, **stat_vars}, x, mutable='batch_stats')
        l = ((x - y) ** 2).sum()
        return l, update_state
    return loss


grad_fn = jax.value_and_grad(make_loss_fn(trainStateWithStatVars.apply_fn), has_aux=True)
print('state', trainStateWithStatVars)

loss_his = []
for i in range(400):
    sep(i)
    (loss, update_state), grads = grad_fn(trainStateWithStatVars.params, trainStateWithStatVars.stat_vars, x)
    loss_his.append(loss)
    trainStateWithStatVars = trainStateWithStatVars.apply_gradients(grads=grads, stat_vars_update=update_state)
    print('opt_state', trainStateWithStatVars.opt_state)
    print('stat_vars', trainStateWithStatVars.stat_vars)
    print('params', trainStateWithStatVars.params)

plt.plot(loss_his)
plt.show()
