text stringlengths 0 4.99k |
|---|
for w, b in zip(self.block_weights, self.biases): |
activations = tnp.matmul(activations, w) + b |
# ReLu activation function |
activations = tnp.maximum(activations, 0.0) |
return tnp.matmul(activations, self.linear_layer) |
Just like with any other Keras model we can utilize any supported optimizer, loss, metrics or callbacks that we want. |
Let's see how the model performs! |
model = TNPForwardFeedRegressionNetwork(blocks=[3, 3]) |
model.compile( |
optimizer=\"adam\", |
loss=\"mean_squared_error\", |
metrics=[keras.metrics.MeanAbsolutePercentageError()], |
) |
evaluate_model(model) |
Mean absolute percent error before training: 422.45343017578125 |
Mean absolute percent error after training: 97.24715423583984 |
Great! Our model seems to be effectively learning to solve the problem at hand. |
We can also write our own custom loss function using TNP. |
def tnp_mse(y_true, y_pred): |
return tnp.mean(tnp.square(y_true - y_pred), axis=0) |
keras.backend.clear_session() |
model = TNPForwardFeedRegressionNetwork(blocks=[3, 3]) |
model.compile( |
optimizer=\"adam\", |
loss=tnp_mse, |
metrics=[keras.metrics.MeanAbsolutePercentageError()], |
) |
evaluate_model(model) |
Mean absolute percent error before training: 79.84039306640625 |
Mean absolute percent error after training: 28.658035278320312 |
Implementing a Keras Layer Based Model with TNP |
If desired, TNP can also be used in layer oriented Keras code structure. Let's implement the same model, but using a layered approach! |
def tnp_relu(x): |
return tnp.maximum(x, 0) |
class TNPDense(keras.layers.Layer): |
def __init__(self, units, activation=None): |
super().__init__() |
self.units = units |
self.activation = activation |
def build(self, input_shape): |
self.w = self.add_weight( |
name=\"weights\", |
shape=(input_shape[1], self.units), |
initializer=\"random_normal\", |
trainable=True, |
) |
self.bias = self.add_weight( |
name=\"bias\", |
shape=(self.units,), |
initializer=\"random_normal\", |
trainable=True, |
) |
def call(self, inputs): |
outputs = tnp.matmul(inputs, self.w) + self.bias |
if self.activation: |
return self.activation(outputs) |
return outputs |
def create_layered_tnp_model(): |
return keras.Sequential( |
[ |
TNPDense(3, activation=tnp_relu), |
TNPDense(3, activation=tnp_relu), |
TNPDense(1), |
] |
) |
model = create_layered_tnp_model() |
model.compile( |
optimizer=\"adam\", |
loss=\"mean_squared_error\", |
metrics=[keras.metrics.MeanAbsolutePercentageError()], |
) |
model.build((None, 13,)) |
model.summary() |
evaluate_model(model) |
Model: \"sequential\" |
_________________________________________________________________ |
Layer (type) Output Shape Param # |
================================================================= |
tnp_dense (TNPDense) (None, 3) 42 |
_________________________________________________________________ |
tnp_dense_1 (TNPDense) (None, 3) 12 |
_________________________________________________________________ |
tnp_dense_2 (TNPDense) (None, 1) 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.