import jax
from jax import numpy as jnp
from jax import random
import matplotlib.pyplot as plt


def predict(W, b, x):
    return jnp.dot(x, W) + b


def mse(W, b, x_batched, y_batched):

    def squared_error(x, y):
        y_pred = predict(W, b, x)
        # print('y_pred', y_pred.shape)  # (5, )
        xinner = jnp.inner(y - y_pred, y - y_pred) / 2.0
        # print('inner', xinner.shape)  # (5,)
        return xinner

    return jnp.mean(jax.vmap(squared_error)(x_batched, y_batched), axis=0)


n_samples = 20
x_dim = 10
y_dim = 5

key = random.PRNGKey(0)
k1, k2 = random.split(key)
W = random.normal(k1, (x_dim, y_dim))
b = random.normal(k2, (y_dim,))

key_sample, key_noise = random.split(k1)
x_samples = random.normal(key_sample, (n_samples, x_dim))
y_samples = predict(W, b, x_samples) + 0.1 * random.normal(key_noise, (n_samples, y_dim))
print('x shape:', x_samples.shape, 'y shape:', y_samples.shape)

W_hat = jnp.zeros_like(W)
b_hat = jnp.zeros_like(b)


@jax.jit
def update_params(W, b, x, y, lr):
    W, b = W - lr * jax.grad(mse, 0)(W, b, x, y), b - lr * jax.grad(mse, 1)(W, b, x, y)
    return W, b


learning_rate = 0.3
print('Loss for "true" W, b', mse(W, b, x_samples, y_samples))

loss_his = []
for i in range(101):
    W_hat, b_hat = update_params(W_hat, b_hat, x_samples, y_samples, learning_rate)
    loss = mse(W_hat, b_hat, x_samples, y_samples)
    loss_his.append(loss)
    if i % 5 == 0:
        print(f'Loss step {i}:', loss)

plt.plot(loss_his)
plt.show()
