{ "cells": [ { "cell_type": "markdown", "id": "dedc2602", "metadata": {}, "source": [ "# Creating a convolutional network" ] }, { "cell_type": "code", "execution_count": 5, "id": "701fb5bd", "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Model: \"sequential\"\n", "_________________________________________________________________\n", " Layer (type) Output Shape Param # \n", "=================================================================\n", " conv2d (Conv2D) (None, 228, 150, 20) 1520 \n", " \n", " dropout (Dropout) (None, 228, 150, 20) 0 \n", " \n", " conv2d_1 (Conv2D) (None, 224, 146, 20) 10020 \n", " \n", " dropout_1 (Dropout) (None, 224, 146, 20) 0 \n", " \n", " max_pooling2d (MaxPooling2D (None, 74, 48, 20) 0 \n", " ) \n", " \n", " conv2d_2 (Conv2D) (None, 70, 44, 20) 10020 \n", " \n", " dropout_2 (Dropout) (None, 70, 44, 20) 0 \n", " \n", " conv2d_3 (Conv2D) (None, 66, 40, 10) 5010 \n", " \n", " dropout_3 (Dropout) (None, 66, 40, 10) 0 \n", " \n", " max_pooling2d_1 (MaxPooling (None, 22, 13, 10) 0 \n", " 2D) \n", " \n", " flatten (Flatten) (None, 2860) 0 \n", " \n", " dense (Dense) (None, 4) 11444 \n", " \n", "=================================================================\n", "Total params: 38,014\n", "Trainable params: 38,014\n", "Non-trainable params: 0\n", "_________________________________________________________________\n" ] } ], "source": [ "import tensorflow as tf\n", "from tensorflow.keras import models, layers\n", "\n", "conv_network = models.Sequential()\n", "conv_network.add(layers.Conv2D(20, (5,5), activation='relu', input_shape=(232, 154, 3)))\n", "conv_network.add(layers.Dropout(0.2))\n", "conv_network.add(layers.Conv2D(20, (5,5), activation='relu'))\n", "conv_network.add(layers.Dropout(0.2))\n", "conv_network.add(layers.MaxPooling2D(3,3))\n", "conv_network.add(layers.Conv2D(20, (5,5), activation='relu'))\n", "conv_network.add(layers.Dropout(0.2))\n", "conv_network.add(layers.Conv2D(10, (5,5), activation='relu'))\n", "conv_network.add(layers.Dropout(0.2))\n", "conv_network.add(layers.MaxPooling2D(3,3))\n", "conv_network.add(layers.Flatten())\n", "conv_network.add(layers.Dense(4, activation='softmax'))\n", "\n", "optimizer=tf.keras.optimizers.Adam(learning_rate=0.02)\n", "\n", "conv_network.compile(optimizer=optimizer, loss='mse', metrics=['accuracy'])\n", "\n", "conv_network.summary()" ] }, { "cell_type": "markdown", "id": "4ab96d93", "metadata": {}, "source": [ "# Loading in the data" ] }, { "cell_type": "code", "execution_count": 20, "id": "2a6353d7", "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 2008 files belonging to 4 classes.\n", "Using 1607 files for training.\n", "Found 2008 files belonging to 4 classes.\n", "Using 401 files for validation.\n" ] } ], "source": [ "data_dir = \"/Users/kerickwalker/src/dis/deep_learning/bat_data\"\n", "\n", "img_width = 154\n", "img_height = 232\n", "batch_size = 128\n", "\n", "# Load in the training data\n", "training_data = tf.keras.utils.image_dataset_from_directory(\n", " data_dir,\n", " validation_split=0.2,\n", " subset=\"training\",\n", " seed=123,\n", " image_size=(img_height, img_width),\n", " batch_size=batch_size)\n", "\n", "# Load in validation data\n", "validation_data = tf.keras.utils.image_dataset_from_directory(\n", " data_dir,\n", " validation_split=0.2,\n", " subset=\"validation\",\n", " seed=123,\n", " image_size=(img_height, img_width),\n", " batch_size=batch_size)" ] }, { "cell_type": "markdown", "id": "cd4adeaa", "metadata": {}, "source": [ "# Training convolutional network" ] }, { "cell_type": "code", "execution_count": null, "id": "c1d53cef", "metadata": {}, "outputs": [], "source": [ "conv_network.fit(training_data, validation_data=validation_data, epochs=10)" ] }, { "cell_type": "markdown", "id": "8a22d520", "metadata": {}, "source": [ "# Transfer Learning with MobileNetV2" ] }, { "cell_type": "markdown", "id": "7451e896", "metadata": {}, "source": [ "#### Convert dataset to numpy array for preprocessing" ] }, { "cell_type": "code", "execution_count": 23, "id": "32c2dd65", "metadata": {}, "outputs": [], "source": [ "import tensorflow as tf\n", "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n", "from tensorflow.keras.applications import MobileNetV2\n", "from tensorflow.keras import layers, models" ] }, { "cell_type": "code", "execution_count": 24, "id": "bcff2372", "metadata": {}, "outputs": [], "source": [ "img_size = (232, 154) # MobileNetV2 input size\n", "batch_size = 32\n", "data_dir = \"/Users/kerickwalker/src/dis/deep_learning/bat_data\"" ] }, { "cell_type": "code", "execution_count": 25, "id": "26d31c9f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 2008 images belonging to 4 classes.\n" ] } ], "source": [ "train_datagen = ImageDataGenerator(\n", " rescale=1./255,\n", " rotation_range=20,\n", " width_shift_range=0.2,\n", " height_shift_range=0.2,\n", " shear_range=0.2,\n", " zoom_range=0.2,\n", " horizontal_flip=True,\n", " fill_mode='nearest'\n", ")\n", "\n", "train_generator = train_datagen.flow_from_directory(\n", " data_dir,\n", " target_size=img_size,\n", " batch_size=batch_size,\n", " class_mode='categorical',\n", " shuffle=True\n", ")" ] }, { "cell_type": "code", "execution_count": 26, "id": "cf420374", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.\n" ] } ], "source": [ "base_model = MobileNetV2(\n", " input_shape=(232, 154, 3),\n", " include_top=False,\n", " weights='imagenet'\n", ")" ] }, { "cell_type": "code", "execution_count": 27, "id": "e7e027fb", "metadata": {}, "outputs": [], "source": [ "for layer in base_model.layers:\n", " layer.trainable = False" ] }, { "cell_type": "code", "execution_count": 28, "id": "2bd9014d", "metadata": {}, "outputs": [], "source": [ "model = models.Sequential()\n", "model.add(base_model)\n", "model.add(layers.GlobalAveragePooling2D())\n", "model.add(layers.Dense(256, activation='relu'))\n", "model.add(layers.Dropout(0.5))\n", "model.add(layers.Dense(4, activation='softmax'))" ] }, { "cell_type": "code", "execution_count": 29, "id": "04aef745", "metadata": {}, "outputs": [], "source": [ "model.compile(\n", " optimizer='adam',\n", " loss='categorical_crossentropy',\n", " metrics=['accuracy']\n", ")" ] }, { "cell_type": "code", "execution_count": 30, "id": "4f624f89", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/10\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2023-11-30 18:29:04.053048: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_0' with dtype int32\n", "\t [[{{node Placeholder/_0}}]]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "62/62 [==============================] - 38s 560ms/step - loss: 0.6074 - accuracy: 0.7657\n", "Epoch 2/10\n", "62/62 [==============================] - 44s 715ms/step - loss: 0.2596 - accuracy: 0.9018\n", "Epoch 3/10\n", "62/62 [==============================] - 50s 809ms/step - loss: 0.2202 - accuracy: 0.9165\n", "Epoch 4/10\n", "62/62 [==============================] - 52s 833ms/step - loss: 0.1985 - accuracy: 0.9276\n", "Epoch 5/10\n", "62/62 [==============================] - 51s 822ms/step - loss: 0.1963 - accuracy: 0.9276\n", "Epoch 6/10\n", "62/62 [==============================] - 57s 922ms/step - loss: 0.2040 - accuracy: 0.9236\n", "Epoch 7/10\n", "62/62 [==============================] - 57s 912ms/step - loss: 0.1698 - accuracy: 0.9357\n", "Epoch 8/10\n", "62/62 [==============================] - 52s 834ms/step - loss: 0.1672 - accuracy: 0.9332\n", "Epoch 9/10\n", "62/62 [==============================] - 50s 795ms/step - loss: 0.1603 - accuracy: 0.9408\n", "Epoch 10/10\n", "62/62 [==============================] - 48s 778ms/step - loss: 0.1711 - accuracy: 0.9332\n" ] } ], "source": [ "history = model.fit(\n", " train_generator,\n", " steps_per_epoch=train_generator.samples // batch_size,\n", " epochs=10\n", ")" ] } ], "metadata": { "kernelspec": { "display_name": "disdl", "language": "python", "name": "disdl" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.5" } }, "nbformat": 4, "nbformat_minor": 5 }