import jax
from jax import random
from jax import numpy as jnp
import flax
import matplotlib.pyplot as plt

n_samples = 20
x_dim = 10
y_dim = 5

key = random.PRNGKey(0)
k1, k2 = random.split(key)
W = random.normal(k1, (x_dim, y_dim))
b = random.normal(k2, (y_dim,))
true_params = flax.core.freeze({
    'params': {
        'kernel': W,
        'bias': b
    }
})

key_sample, key_noise = random.split(k1)
x_samples = random.normal(key_sample, (n_samples, x_dim))
y_samples = jnp.dot(x_samples, W) + b + 0.1 * random.normal(key_noise, (n_samples, y_dim))
print('x shape:', x_samples.shape, 'y shape:', y_samples.shape)

model = flax.linen.Dense(features=5)

key1, key2 = random.split(random.PRNGKey(0))
x = random.normal(key1, (10,)) # Dummy input data
params = model.init(key2, x) # Initialization call
print('params', jax.tree_util.tree_map(lambda x: x.shape, params)) # Checking output shapes


@jax.jit
def mse(params, x_batched, y_batched):

    def squared_error(x, y):
        pred = model.apply(params, x) - y
        return jnp.inner(pred, pred) / 2.0

    res = jax.vmap(squared_error)(x_batched, y_batched)
    return jnp.mean(res, axis=0)


learning_rate = 0.3
print('Loss for "true" W, b:', mse(true_params, x_samples, y_samples))
loss_grad_fn = jax.value_and_grad(mse)


@jax.jit
def update_params(params, learning_rate, grads):
    params = jax.tree_util.tree_map(
        lambda p, g: p - learning_rate * g,
        params,
        grads
    )
    return params


loss_his = []
for i in range(101):
    loss_val, grads = loss_grad_fn(params, x_samples, y_samples)
    loss_his.append(loss_val)
    params = update_params(params, learning_rate, grads)
    if i % 10 == 0:
        print(f'Loss step {i}:', loss_val)

plt.plot(loss_his)
plt.show()
