import jax
import optax
from jax import random
from jax import numpy as jnp
from jax.tree_util import tree_map
import flax
from flax.core import freeze
from flax import linen as nn
from PyCmpltrtok.common import sep
from functools import partial
import matplotlib.pyplot as plt


class BiasAdderWithRunningMean(nn.Module):
    decay: float = 0.99

    @nn.compact
    def __call__(self, x):
        is_initialized = self.has_variable('batch_stats', 'mean')
        ra_mean = self.variable('batch_stats', 'mean', lambda s: jnp.zeros(s), x.shape[1:])
        mean = ra_mean.value  # trigger init
        bias = self.param('bias', lambda rng, shape: jnp.zeros(shape), x.shape[1:])
        if is_initialized:
            ra_mean.value = self.decay * ra_mean.value + (1.0 - self.decay) * jnp.mean(x, axis=0, keepdims=True)
        return x - ra_mean.value + bias


@partial(jax.jit, static_argnums=(0, 1))
def update_step(tx, apply_fn, x, opt_state, params, state):
    sep('Entered update_step')

    def loss(params):
        y, update_state = apply_fn({'params': params, ** state}, x, mutable=list(state.keys()))
        l = ((x - y) ** 2).sum()
        return l, update_state

    (l, state), grads = jax.value_and_grad(loss, has_aux=True)(params)
    print('state', state)
    print('grads', grads)
    updates, opt_state = tx.update(grads, opt_state)
    print('opt_state', opt_state)
    print('updates', updates)
    params = optax.apply_updates(params, updates)
    sep('Leaving update_step')
    return opt_state, params, state, l


x = jnp.ones((10, 5))
model = BiasAdderWithRunningMean()
variables = model.init(random.PRNGKey(0), x)
state, params = variables.pop('params')
print('state', state)
print('params', params)
del variables
tx = optax.sgd(learning_rate=0.02)
opt_state = tx.init(params)
print('opt_state', tree_map(lambda x: x.shape, opt_state))

loss_hist = []
for i in range(400):
    sep(i)
    opt_state, params, state, loss = update_step(tx, model.apply, x, opt_state, params, state)
    loss_hist.append(loss)
    print('opt_state', opt_state)
    print('state', state)
    print('params', params)

plt.plot(loss_hist)
plt.show()
