text stringlengths 0 4.99k |
|---|
# Create a GRN for each feature independently |
for idx in range(num_features): |
grn = GatedResidualNetwork(units, dropout_rate) |
self.grns.append(grn) |
# Create a GRN for the concatenation of all the features |
self.grn_concat = GatedResidualNetwork(units, dropout_rate) |
self.softmax = layers.Dense(units=num_features, activation=\"softmax\") |
def call(self, inputs): |
v = layers.concatenate(inputs) |
v = self.grn_concat(v) |
v = tf.expand_dims(self.softmax(v), axis=-1) |
x = [] |
for idx, input in enumerate(inputs): |
x.append(self.grns[idx](input)) |
x = tf.stack(x, axis=1) |
outputs = tf.squeeze(tf.matmul(v, x, transpose_a=True), axis=1) |
return outputs |
Create Gated Residual and Variable Selection Networks model |
def create_model(encoding_size): |
inputs = create_model_inputs() |
feature_list = encode_inputs(inputs, encoding_size) |
num_features = len(feature_list) |
features = VariableSelection(num_features, encoding_size, dropout_rate)( |
feature_list |
) |
outputs = layers.Dense(units=1, activation=\"sigmoid\")(features) |
model = keras.Model(inputs=inputs, outputs=outputs) |
return model |
Compile, train, and evaluate the model |
learning_rate = 0.001 |
dropout_rate = 0.15 |
batch_size = 265 |
num_epochs = 20 |
encoding_size = 16 |
model = create_model(encoding_size) |
model.compile( |
optimizer=keras.optimizers.Adam(learning_rate=learning_rate), |
loss=keras.losses.BinaryCrossentropy(), |
metrics=[keras.metrics.BinaryAccuracy(name=\"accuracy\")], |
) |
# Create an early stopping callback. |
early_stopping = tf.keras.callbacks.EarlyStopping( |
monitor=\"val_loss\", patience=5, restore_best_weights=True |
) |
print(\"Start training the model...\") |
train_dataset = get_dataset_from_csv( |
train_data_file, shuffle=True, batch_size=batch_size |
) |
valid_dataset = get_dataset_from_csv(valid_data_file, batch_size=batch_size) |
model.fit( |
train_dataset, |
epochs=num_epochs, |
validation_data=valid_dataset, |
callbacks=[early_stopping], |
) |
print(\"Model training finished.\") |
print(\"Evaluating model performance...\") |
test_dataset = get_dataset_from_csv(test_data_file, batch_size=batch_size) |
_, accuracy = model.evaluate(test_dataset) |
print(f\"Test accuracy: {round(accuracy * 100, 2)}%\") |
Start training the model... |
Epoch 1/20 |
641/641 [==============================] - 26s 22ms/step - loss: 317.7028 - accuracy: 0.9353 - val_loss: 230.1805 - val_accuracy: 0.9497 |
Epoch 2/20 |
641/641 [==============================] - 13s 19ms/step - loss: 231.4161 - accuracy: 0.9506 - val_loss: 224.7825 - val_accuracy: 0.9498 |
Epoch 3/20 |
641/641 [==============================] - 12s 19ms/step - loss: 226.8173 - accuracy: 0.9503 - val_loss: 223.0818 - val_accuracy: 0.9508 |
Epoch 4/20 |
641/641 [==============================] - 13s 19ms/step - loss: 224.1516 - accuracy: 0.9507 - val_loss: 221.8637 - val_accuracy: 0.9509 |
Epoch 5/20 |
641/641 [==============================] - 13s 19ms/step - loss: 223.9696 - accuracy: 0.9507 - val_loss: 217.8728 - val_accuracy: 0.9513 |
Epoch 6/20 |
641/641 [==============================] - 13s 19ms/step - loss: 220.7267 - accuracy: 0.9508 - val_loss: 220.2448 - val_accuracy: 0.9516 |
Epoch 7/20 |
641/641 [==============================] - 13s 19ms/step - loss: 219.7464 - accuracy: 0.9514 - val_loss: 216.4628 - val_accuracy: 0.9516 |
Epoch 8/20 |
641/641 [==============================] - 13s 19ms/step - loss: 218.7294 - accuracy: 0.9517 - val_loss: 215.2192 - val_accuracy: 0.9519 |
Epoch 9/20 |
641/641 [==============================] - 12s 19ms/step - loss: 218.3938 - accuracy: 0.9516 - val_loss: 217.1790 - val_accuracy: 0.9514 |
Epoch 10/20 |
641/641 [==============================] - 13s 19ms/step - loss: 217.2871 - accuracy: 0.9522 - val_loss: 213.4623 - val_accuracy: 0.9523 |
Epoch 11/20 |
641/641 [==============================] - 13s 19ms/step - loss: 215.0476 - accuracy: 0.9522 - val_loss: 211.6762 - val_accuracy: 0.9523 |
Epoch 12/20 |
641/641 [==============================] - 13s 19ms/step - loss: 213.2402 - accuracy: 0.9527 - val_loss: 212.2001 - val_accuracy: 0.9525 |
Epoch 13/20 |
641/641 [==============================] - 13s 20ms/step - loss: 212.8123 - accuracy: 0.9530 - val_loss: 207.9878 - val_accuracy: 0.9538 |
Epoch 14/20 |
641/641 [==============================] - 13s 19ms/step - loss: 208.4605 - accuracy: 0.9541 - val_loss: 208.0063 - val_accuracy: 0.9543 |
Epoch 15/20 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.