from datasets import load_dataset import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from PIL import Image from tensorflow import keras import tensorflow_datasets as tfds from tensorflow.keras import layers from tensorflow.keras.models import Sequential (train_data, test_data), dataset_info = tfds.load(name = 'food101', split = ['train', 'validation'], shuffle_files = True, as_supervised = True, with_info = True) labels = dataset_info.features['label'].names num_labels = len(labels) def preprocess_img(image, label, img_size = 224): image = tf.image.resize(image, [img_size, img_size]) image = tf.cast(image, tf.float32) return image, label train_data = train_data.map(preprocess_img, num_parallel_calls = tf.data.AUTOTUNE).batch(batch_size = num_labels) test_data = test_data.map(preprocess_img, num_parallel_calls = tf.data.AUTOTUNE).batch(batch_size = num_labels) model = keras.Sequential() inputs = layers.Input(shape = (224, 224, 3)) layer1 = layers.GlobalAveragePooling2D() layer2 = layers.Dense(101, activation = 'relu') layer3 = layers.Activation('softmax', dtype = tf.float32) model.add(inputs) model.add(layer1) model.add(layer2) model.add(layer3) model.compile( loss = keras.losses.SparseCategoricalCrossentropy(from_logits = False), optimizer = keras.optimizers.legacy.Adam(learning_rate = 0.001), metrics = ['accuracy'], ) model.summary() model.fit(train_data, epochs=5, verbose = 2, batch_size=101) model.evaluate(test_data, batch_size=101) model.save("foodTrain.pd")