|
import tensorflow as tf |
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator |
|
from tensorflow.keras.models import Sequential |
|
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout |
|
import numpy as np |
|
from tensorflow.keras.preprocessing import image |
|
|
|
|
|
IMG_SIZE = 224 |
|
BATCH_SIZE = 32 |
|
|
|
|
|
train_dir = 'm2rncvif2arzs1w3q44gfn\images.cv_m2rncvif2arzs1w3q44gfn\data\train\burrito' |
|
val_dir = 'm2rncvif2arzs1w3q44gfn\images.cv_m2rncvif2arzs1w3q44gfn\data\val\burrito' |
|
|
|
|
|
train_datagen = ImageDataGenerator( |
|
rescale=1./255, |
|
rotation_range=20, |
|
width_shift_range=0.1, |
|
height_shift_range=0.1, |
|
shear_range=0.2, |
|
zoom_range=0.2, |
|
horizontal_flip=True, |
|
fill_mode='nearest') |
|
|
|
val_datagen = ImageDataGenerator(rescale=1./255) |
|
|
|
|
|
train_generator = train_datagen.flow_from_directory( |
|
train_dir, |
|
target_size=(IMG_SIZE, IMG_SIZE), |
|
batch_size=BATCH_SIZE, |
|
class_mode='categorical') |
|
|
|
val_generator = val_datagen.flow_from_directory( |
|
val_dir, |
|
target_size=(IMG_SIZE, IMG_SIZE), |
|
batch_size=BATCH_SIZE, |
|
class_mode='categorical') |
|
|
|
|
|
model = Sequential() |
|
|
|
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 3))) |
|
model.add(MaxPooling2D((2, 2))) |
|
|
|
model.add(Conv2D(64, (3, 3), activation='relu')) |
|
model.add(MaxPooling2D((2, 2))) |
|
|
|
model.add(Conv2D(128, (3, 3), activation='relu')) |
|
model.add(MaxPooling2D((2, 2))) |
|
|
|
model.add(Conv2D(256, (3, 3), activation='relu')) |
|
model.add(MaxPooling2D((2, 2))) |
|
|
|
model.add(Flatten()) |
|
model.add(Dense(512, activation='relu')) |
|
model.add(Dropout(0.5)) |
|
model.add(Dense(2, activation='softmax')) |
|
|
|
|
|
model.compile(loss='categorical_crossentropy', |
|
optimizer='adam', |
|
metrics=['accuracy']) |
|
|
|
|
|
train_steps_per_epoch = train_generator.samples // BATCH_SIZE |
|
val_steps_per_epoch = val_generator.samples // BATCH_SIZE |
|
|
|
|
|
history = model.fit_generator( |
|
train_generator, |
|
steps_per_epoch=train_steps_per_epoch, |
|
epochs=10, |
|
validation_data=val_generator, |
|
validation_steps=val_steps_per_epoch) |
|
|
|
|
|
dir_path = 'm2rncvif2arzs1w3q44gfn\images.cv_m2rncvif2arzs1w3q44gfn\data\test\burrito' |
|
|
|
|
|
for img_file in os.listdir(dir_path): |
|
|
|
img_path = os.path.join(dir_path, img_file) |
|
img = image.load_img(img_path, target_size=(IMG_SIZE, IMG_SIZE)) |
|
img_array = image.img_to_array(img) |
|
img_array = np.expand_dims(img_array, axis=0) |
|
img_array /= 255.0 |
|
|
|
|
|
prediction = model.predict(img_array) |
|
|
|
|
|
if prediction[0][0] > prediction[0][1]: |
|
print('{}: Not a burrito'.format(img_file)) |
|
else: |
|
print('{}: Burrito!'.format(img_file)) |