import argparse
import tensorflow as tf
from tensorflow import keras as K
import numpy as np
import matplotlib.pyplot as plt


parser = argparse.ArgumentParser()
parser.add_argument("--low", type=float, default=0.1)
parser.add_argument("--high", type=float, default=10)
parser.add_argument("--n_data", type=int, default=50)
parser.add_argument("--n_centre", type=int, default=25)
parser.add_argument("--epoch", type=int, default=500)
args = parser.parse_args()


# data
X = np.linspace(args.low, args.high, args.n_data).astype(np.float32)  # [n, 1]
Y = np.log10(X).astype(np.float32)  # [n, 1]
print("X:", X.shape, ", Y:", Y.shape)


def euclidean(A, B=None, sqrt=False):
    if (B is None) or (B is A):
        aTb = tf.matmul(A, tf.transpose(A))
        aTa = bTb = tf.linalg.diag_part(aTb)
    else:
        aTb = tf.matmul(A, tf.transpose(B))
        aTa = tf.linalg.diag_part(tf.matmul(A, tf.transpose(A)))
        bTb = tf.linalg.diag_part(tf.matmul(B, tf.transpose(B)))
    D = aTa[:, None] - 2.0 * aTb + bTb[None, :]
    D = tf.maximum(D, 0.0)
    if sqrt:
        mask = tf.cast(tf.equal(D, 0.0), "float32")
        D = D + mask * 1e-16
        D = tf.math.sqrt(D)
        D = D * (1.0 - mask)
    return D


# model
class RBF_Cell(K.Model):
    def __init__(self, centre, var):
        """RBF with Gaussian kernel
        centre: tensor, with same shape as input x
        var: scalar, variance of Gaussian
        """
        super(RBF_Cell, self).__init__()
        self.centre = tf.cast(centre[np.newaxis, :], "float32")  # [1, 1]
        self.coef = tf.cast(- 0.5 / var, "float32")

    def call(self, x):
        """[n, 1] -> [n, 1]"""
        return tf.math.exp(self.coef * euclidean(x, self.centre))


class RBF_Net(K.Model):
    def __init__(self, centres):
        super(RBF_Net, self).__init__()
        var = 0.5 * euclidean(centres[-1:],  centres[:1]) / centres.shape[0]
        print("var:", var)
        self.rbf_list = [RBF_Cell(c, var) for c in centres]
        self.fc = K.layers.Dense(1, input_shape=[centres.shape[0]])

    def call(self, x):
        """[n, 1] -> [n, #centres] -> [n, 1]"""
        x = tf.concat([rbf(x) for rbf in self.rbf_list], axis=1)
        return self.fc(x)


centres = np.linspace(X.min(), X.max(), args.n_centre)[:, np.newaxis]  # [n, 1]
#print("centres:", centres.shape, '\n', centres)
model = RBF_Net(centres)
optimizer = K.optimizers.Adam()


X_hat = X[:, np.newaxis]  # [n, 1]
Y_hat = Y[:, np.newaxis]
print("X^:", X_hat.shape)
loss_list = []
for epoch in range(args.epoch):
    with tf.GradientTape() as tape:
        z = model(X_hat)
        loss = tf.nn.l2_loss(Y_hat - z)
    grad = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(grad, model.trainable_variables))

    loss_list.append(loss.numpy())
    print("epoch:", epoch, ", loss:", loss.numpy())


# visualization
fig = plt.figure()
plt.title("y = lg(x)")
plt.scatter(X, Y, s=25)
plt.plot(X, model(X_hat))
plt.show()
fig.savefig("log10.png")
