text
stringlengths
0
4.99k
\"country_of_birth_self\",
\"citizenship\",
\"own_business_or_self_employed\",
\"fill_inc_questionnaire_for_veteran's_admin\",
\"veterans_benefits\",
\"weeks_worked_in_year\",
\"year\",
\"income_level\",
]
data_url = \"https://archive.ics.uci.edu/ml/machine-learning-databases/census-income-mld/census-income.data.gz\"
data = pd.read_csv(data_url, header=None, names=CSV_HEADER)
test_data_url = \"https://archive.ics.uci.edu/ml/machine-learning-databases/census-income-mld/census-income.test.gz\"
test_data = pd.read_csv(test_data_url, header=None, names=CSV_HEADER)
print(f\"Data shape: {data.shape}\")
print(f\"Test data shape: {test_data.shape}\")
Data shape: (199523, 42)
Test data shape: (99762, 42)
We convert the target column from string to integer.
data[\"income_level\"] = data[\"income_level\"].apply(
lambda x: 0 if x == \" - 50000.\" else 1
)
test_data[\"income_level\"] = test_data[\"income_level\"].apply(
lambda x: 0 if x == \" - 50000.\" else 1
)
Then, We split the dataset into train and validation sets.
random_selection = np.random.rand(len(data.index)) <= 0.85
train_data = data[random_selection]
valid_data = data[~random_selection]
Finally we store the train and test data splits locally to CSV files.
train_data_file = \"train_data.csv\"
valid_data_file = \"valid_data.csv\"
test_data_file = \"test_data.csv\"
train_data.to_csv(train_data_file, index=False, header=False)
valid_data.to_csv(valid_data_file, index=False, header=False)
test_data.to_csv(test_data_file, index=False, header=False)
Define dataset metadata
Here, we define the metadata of the dataset that will be useful for reading and parsing the data into input features, and encoding the input features with respect to their types.
# Target feature name.
TARGET_FEATURE_NAME = \"income_level\"
# Weight column name.
WEIGHT_COLUMN_NAME = \"instance_weight\"
# Numeric feature names.
NUMERIC_FEATURE_NAMES = [
\"age\",
\"wage_per_hour\",
\"capital_gains\",
\"capital_losses\",
\"dividends_from_stocks\",
\"num_persons_worked_for_employer\",
\"weeks_worked_in_year\",
]
# Categorical features and their vocabulary lists.
# Note that we add 'v=' as a prefix to all categorical feature values to make
# sure that they are treated as strings.
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
feature_name: sorted([str(value) for value in list(data[feature_name].unique())])
for feature_name in CSV_HEADER
if feature_name
not in list(NUMERIC_FEATURE_NAMES + [WEIGHT_COLUMN_NAME, TARGET_FEATURE_NAME])
}
# All features names.
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + list(
CATEGORICAL_FEATURES_WITH_VOCABULARY.keys()
)
# Feature default values.
COLUMN_DEFAULTS = [
[0.0]
if feature_name in NUMERIC_FEATURE_NAMES + [TARGET_FEATURE_NAME, WEIGHT_COLUMN_NAME]
else [\"NA\"]
for feature_name in CSV_HEADER
]
Create a tf.data.Dataset for training and evaluation
We create an input function to read and parse the file, and convert features and labels into a [tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) for training and evaluation.
from tensorflow.keras.layers import StringLookup
def process(features, target):
for feature_name in features:
if feature_name in CATEGORICAL_FEATURES_WITH_VOCABULARY:
# Cast categorical feature values to string.
features[feature_name] = tf.cast(features[feature_name], tf.dtypes.string)
# Get the instance weight.
weight = features.pop(WEIGHT_COLUMN_NAME)
return features, target, weight
def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128):
dataset = tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,