import jax
from jax import random
from jax import numpy as jnp
import flax
from flax.core import freeze
from flax import linen as nn
from PyCmpltrtok.common import sep


class BiasAdderWithRunningMean(nn.Module):
    decay: float = 0.99

    @nn.compact
    def __call__(self, x):
        is_initialized = self.has_variable('batch_stats', 'mean')
        ra_mean = self.variable('batch_stats', 'mean', lambda s: jnp.zeros(s), x.shape[1:])
        mean = ra_mean.value  # trigger init
        bias = self.param('bias', lambda rng, shape: jnp.zeros(shape), x.shape[1:])
        if is_initialized:
            ra_mean.value = self.decay * ra_mean.value + (1.0 - self.decay) * jnp.mean(x, axis=0, keepdims=True)
        return x - ra_mean.value + bias


key1, key2 = random.split(random.PRNGKey(0), 2)
x = jnp.ones((10, 5))
model = BiasAdderWithRunningMean()
variables = model.init(key1, x)
print('variables', variables)
sep('1')
y, updated_state = model.apply(variables, x, mutable=['batch_stats'])
print('y', y)
print('updated_state', updated_state)
old_state, params = variables.pop('params')
variables = freeze({'params': params, **updated_state})
print('variables', variables)
sep('2')
y, updated_state = model.apply(variables, x, mutable=['batch_stats'])
print('y', y)
print('updated_state', updated_state)
old_state, params = variables.pop('params')
variables = freeze({'params': params, **updated_state})
print('variables', variables)
sep('3')
y, updated_state = model.apply(variables, x, mutable=['batch_stats'])
print('y', y)
print('updated_state', updated_state)
old_state, params = variables.pop('params')
variables = freeze({'params': params, **updated_state})
print('variables', variables)
