text stringlengths 0 4.99k |
|---|
for _, group_data in data.groupby(\"Cover_Type\"): |
random_selection = np.random.rand(len(group_data.index)) <= 0.85 |
train_splits.append(group_data[random_selection]) |
test_splits.append(group_data[~random_selection]) |
train_data = pd.concat(train_splits).sample(frac=1).reset_index(drop=True) |
test_data = pd.concat(test_splits).sample(frac=1).reset_index(drop=True) |
print(f\"Train split size: {len(train_data.index)}\") |
print(f\"Test split size: {len(test_data.index)}\") |
Train split size: 493323 |
Test split size: 87689 |
Next, store the training and test data in separate CSV files. |
train_data_file = \"train_data.csv\" |
test_data_file = \"test_data.csv\" |
train_data.to_csv(train_data_file, index=False) |
test_data.to_csv(test_data_file, index=False) |
Define dataset metadata |
Here, we define the metadata of the dataset that will be useful for reading and parsing the data into input features, and encoding the input features with respect to their types. |
TARGET_FEATURE_NAME = \"Cover_Type\" |
TARGET_FEATURE_LABELS = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\"] |
NUMERIC_FEATURE_NAMES = [ |
\"Aspect\", |
\"Elevation\", |
\"Hillshade_3pm\", |
\"Hillshade_9am\", |
\"Hillshade_Noon\", |
\"Horizontal_Distance_To_Fire_Points\", |
\"Horizontal_Distance_To_Hydrology\", |
\"Horizontal_Distance_To_Roadways\", |
\"Slope\", |
\"Vertical_Distance_To_Hydrology\", |
] |
CATEGORICAL_FEATURES_WITH_VOCABULARY = { |
\"Soil_Type\": list(data[\"Soil_Type\"].unique()), |
\"Wilderness_Area\": list(data[\"Wilderness_Area\"].unique()), |
} |
CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys()) |
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES |
COLUMN_DEFAULTS = [ |
[0] if feature_name in NUMERIC_FEATURE_NAMES + [TARGET_FEATURE_NAME] else [\"NA\"] |
for feature_name in CSV_HEADER |
] |
NUM_CLASSES = len(TARGET_FEATURE_LABELS) |
Experiment setup |
Next, let's define an input function that reads and parses the file, then converts features and labels into atf.data.Dataset for training or evaluation. |
def get_dataset_from_csv(csv_file_path, batch_size, shuffle=False): |
dataset = tf.data.experimental.make_csv_dataset( |
csv_file_path, |
batch_size=batch_size, |
column_names=CSV_HEADER, |
column_defaults=COLUMN_DEFAULTS, |
label_name=TARGET_FEATURE_NAME, |
num_epochs=1, |
header=True, |
shuffle=shuffle, |
) |
return dataset.cache() |
Here we configure the parameters and implement the procedure for running a training and evaluation experiment given a model. |
learning_rate = 0.001 |
dropout_rate = 0.1 |
batch_size = 265 |
num_epochs = 50 |
hidden_units = [32, 32] |
def run_experiment(model): |
model.compile( |
optimizer=keras.optimizers.Adam(learning_rate=learning_rate), |
loss=keras.losses.SparseCategoricalCrossentropy(), |
metrics=[keras.metrics.SparseCategoricalAccuracy()], |
) |
train_dataset = get_dataset_from_csv(train_data_file, batch_size, shuffle=True) |
test_dataset = get_dataset_from_csv(test_data_file, batch_size) |
print(\"Start training the model...\") |
history = model.fit(train_dataset, epochs=num_epochs) |
print(\"Model training finished\") |
_, accuracy = model.evaluate(test_dataset, verbose=0) |
print(f\"Test accuracy: {round(accuracy * 100, 2)}%\") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.