vibha-mah commited on
Commit
9c69f85
1 Parent(s): 2e7075d

Upload bat_classifier.ipynb

Browse files
Files changed (1) hide show
  1. bat_classifier.ipynb +372 -0
bat_classifier.ipynb ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "dedc2602",
6
+ "metadata": {},
7
+ "source": [
8
+ "# Creating a convolutional network"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 5,
14
+ "id": "701fb5bd",
15
+ "metadata": {
16
+ "scrolled": true
17
+ },
18
+ "outputs": [
19
+ {
20
+ "name": "stdout",
21
+ "output_type": "stream",
22
+ "text": [
23
+ "Model: \"sequential\"\n",
24
+ "_________________________________________________________________\n",
25
+ " Layer (type) Output Shape Param # \n",
26
+ "=================================================================\n",
27
+ " conv2d (Conv2D) (None, 228, 150, 20) 1520 \n",
28
+ " \n",
29
+ " dropout (Dropout) (None, 228, 150, 20) 0 \n",
30
+ " \n",
31
+ " conv2d_1 (Conv2D) (None, 224, 146, 20) 10020 \n",
32
+ " \n",
33
+ " dropout_1 (Dropout) (None, 224, 146, 20) 0 \n",
34
+ " \n",
35
+ " max_pooling2d (MaxPooling2D (None, 74, 48, 20) 0 \n",
36
+ " ) \n",
37
+ " \n",
38
+ " conv2d_2 (Conv2D) (None, 70, 44, 20) 10020 \n",
39
+ " \n",
40
+ " dropout_2 (Dropout) (None, 70, 44, 20) 0 \n",
41
+ " \n",
42
+ " conv2d_3 (Conv2D) (None, 66, 40, 10) 5010 \n",
43
+ " \n",
44
+ " dropout_3 (Dropout) (None, 66, 40, 10) 0 \n",
45
+ " \n",
46
+ " max_pooling2d_1 (MaxPooling (None, 22, 13, 10) 0 \n",
47
+ " 2D) \n",
48
+ " \n",
49
+ " flatten (Flatten) (None, 2860) 0 \n",
50
+ " \n",
51
+ " dense (Dense) (None, 4) 11444 \n",
52
+ " \n",
53
+ "=================================================================\n",
54
+ "Total params: 38,014\n",
55
+ "Trainable params: 38,014\n",
56
+ "Non-trainable params: 0\n",
57
+ "_________________________________________________________________\n"
58
+ ]
59
+ }
60
+ ],
61
+ "source": [
62
+ "import tensorflow as tf\n",
63
+ "from tensorflow.keras import models, layers\n",
64
+ "\n",
65
+ "conv_network = models.Sequential()\n",
66
+ "conv_network.add(layers.Conv2D(20, (5,5), activation='relu', input_shape=(232, 154, 3)))\n",
67
+ "conv_network.add(layers.Dropout(0.2))\n",
68
+ "conv_network.add(layers.Conv2D(20, (5,5), activation='relu'))\n",
69
+ "conv_network.add(layers.Dropout(0.2))\n",
70
+ "conv_network.add(layers.MaxPooling2D(3,3))\n",
71
+ "conv_network.add(layers.Conv2D(20, (5,5), activation='relu'))\n",
72
+ "conv_network.add(layers.Dropout(0.2))\n",
73
+ "conv_network.add(layers.Conv2D(10, (5,5), activation='relu'))\n",
74
+ "conv_network.add(layers.Dropout(0.2))\n",
75
+ "conv_network.add(layers.MaxPooling2D(3,3))\n",
76
+ "conv_network.add(layers.Flatten())\n",
77
+ "conv_network.add(layers.Dense(4, activation='softmax'))\n",
78
+ "\n",
79
+ "optimizer=tf.keras.optimizers.Adam(learning_rate=0.02)\n",
80
+ "\n",
81
+ "conv_network.compile(optimizer=optimizer, loss='mse', metrics=['accuracy'])\n",
82
+ "\n",
83
+ "conv_network.summary()"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "markdown",
88
+ "id": "4ab96d93",
89
+ "metadata": {},
90
+ "source": [
91
+ "# Loading in the data"
92
+ ]
93
+ },
94
+ {
95
+ "cell_type": "code",
96
+ "execution_count": 20,
97
+ "id": "2a6353d7",
98
+ "metadata": {
99
+ "scrolled": true
100
+ },
101
+ "outputs": [
102
+ {
103
+ "name": "stdout",
104
+ "output_type": "stream",
105
+ "text": [
106
+ "Found 2008 files belonging to 4 classes.\n",
107
+ "Using 1607 files for training.\n",
108
+ "Found 2008 files belonging to 4 classes.\n",
109
+ "Using 401 files for validation.\n"
110
+ ]
111
+ }
112
+ ],
113
+ "source": [
114
+ "data_dir = \"/Users/kerickwalker/src/dis/deep_learning/bat_data\"\n",
115
+ "\n",
116
+ "img_width = 154\n",
117
+ "img_height = 232\n",
118
+ "batch_size = 128\n",
119
+ "\n",
120
+ "# Load in the training data\n",
121
+ "training_data = tf.keras.utils.image_dataset_from_directory(\n",
122
+ " data_dir,\n",
123
+ " validation_split=0.2,\n",
124
+ " subset=\"training\",\n",
125
+ " seed=123,\n",
126
+ " image_size=(img_height, img_width),\n",
127
+ " batch_size=batch_size)\n",
128
+ "\n",
129
+ "# Load in validation data\n",
130
+ "validation_data = tf.keras.utils.image_dataset_from_directory(\n",
131
+ " data_dir,\n",
132
+ " validation_split=0.2,\n",
133
+ " subset=\"validation\",\n",
134
+ " seed=123,\n",
135
+ " image_size=(img_height, img_width),\n",
136
+ " batch_size=batch_size)"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "markdown",
141
+ "id": "cd4adeaa",
142
+ "metadata": {},
143
+ "source": [
144
+ "# Training convolutional network"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": null,
150
+ "id": "c1d53cef",
151
+ "metadata": {},
152
+ "outputs": [],
153
+ "source": [
154
+ "conv_network.fit(training_data, validation_data=validation_data, epochs=10)"
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "markdown",
159
+ "id": "8a22d520",
160
+ "metadata": {},
161
+ "source": [
162
+ "# Transfer Learning with MobileNetV2"
163
+ ]
164
+ },
165
+ {
166
+ "cell_type": "markdown",
167
+ "id": "7451e896",
168
+ "metadata": {},
169
+ "source": [
170
+ "#### Convert dataset to numpy array for preprocessing"
171
+ ]
172
+ },
173
+ {
174
+ "cell_type": "code",
175
+ "execution_count": 23,
176
+ "id": "32c2dd65",
177
+ "metadata": {},
178
+ "outputs": [],
179
+ "source": [
180
+ "import tensorflow as tf\n",
181
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
182
+ "from tensorflow.keras.applications import MobileNetV2\n",
183
+ "from tensorflow.keras import layers, models"
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "code",
188
+ "execution_count": 24,
189
+ "id": "bcff2372",
190
+ "metadata": {},
191
+ "outputs": [],
192
+ "source": [
193
+ "img_size = (232, 154) # MobileNetV2 input size\n",
194
+ "batch_size = 32\n",
195
+ "data_dir = \"/Users/kerickwalker/src/dis/deep_learning/bat_data\""
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": 25,
201
+ "id": "26d31c9f",
202
+ "metadata": {},
203
+ "outputs": [
204
+ {
205
+ "name": "stdout",
206
+ "output_type": "stream",
207
+ "text": [
208
+ "Found 2008 images belonging to 4 classes.\n"
209
+ ]
210
+ }
211
+ ],
212
+ "source": [
213
+ "train_datagen = ImageDataGenerator(\n",
214
+ " rescale=1./255,\n",
215
+ " rotation_range=20,\n",
216
+ " width_shift_range=0.2,\n",
217
+ " height_shift_range=0.2,\n",
218
+ " shear_range=0.2,\n",
219
+ " zoom_range=0.2,\n",
220
+ " horizontal_flip=True,\n",
221
+ " fill_mode='nearest'\n",
222
+ ")\n",
223
+ "\n",
224
+ "train_generator = train_datagen.flow_from_directory(\n",
225
+ " data_dir,\n",
226
+ " target_size=img_size,\n",
227
+ " batch_size=batch_size,\n",
228
+ " class_mode='categorical',\n",
229
+ " shuffle=True\n",
230
+ ")"
231
+ ]
232
+ },
233
+ {
234
+ "cell_type": "code",
235
+ "execution_count": 26,
236
+ "id": "cf420374",
237
+ "metadata": {},
238
+ "outputs": [
239
+ {
240
+ "name": "stdout",
241
+ "output_type": "stream",
242
+ "text": [
243
+ "WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.\n"
244
+ ]
245
+ }
246
+ ],
247
+ "source": [
248
+ "base_model = MobileNetV2(\n",
249
+ " input_shape=(232, 154, 3),\n",
250
+ " include_top=False,\n",
251
+ " weights='imagenet'\n",
252
+ ")"
253
+ ]
254
+ },
255
+ {
256
+ "cell_type": "code",
257
+ "execution_count": 27,
258
+ "id": "e7e027fb",
259
+ "metadata": {},
260
+ "outputs": [],
261
+ "source": [
262
+ "for layer in base_model.layers:\n",
263
+ " layer.trainable = False"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "code",
268
+ "execution_count": 28,
269
+ "id": "2bd9014d",
270
+ "metadata": {},
271
+ "outputs": [],
272
+ "source": [
273
+ "model = models.Sequential()\n",
274
+ "model.add(base_model)\n",
275
+ "model.add(layers.GlobalAveragePooling2D())\n",
276
+ "model.add(layers.Dense(256, activation='relu'))\n",
277
+ "model.add(layers.Dropout(0.5))\n",
278
+ "model.add(layers.Dense(4, activation='softmax'))"
279
+ ]
280
+ },
281
+ {
282
+ "cell_type": "code",
283
+ "execution_count": 29,
284
+ "id": "04aef745",
285
+ "metadata": {},
286
+ "outputs": [],
287
+ "source": [
288
+ "model.compile(\n",
289
+ " optimizer='adam',\n",
290
+ " loss='categorical_crossentropy',\n",
291
+ " metrics=['accuracy']\n",
292
+ ")"
293
+ ]
294
+ },
295
+ {
296
+ "cell_type": "code",
297
+ "execution_count": 30,
298
+ "id": "4f624f89",
299
+ "metadata": {},
300
+ "outputs": [
301
+ {
302
+ "name": "stdout",
303
+ "output_type": "stream",
304
+ "text": [
305
+ "Epoch 1/10\n"
306
+ ]
307
+ },
308
+ {
309
+ "name": "stderr",
310
+ "output_type": "stream",
311
+ "text": [
312
+ "2023-11-30 18:29:04.053048: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_0' with dtype int32\n",
313
+ "\t [[{{node Placeholder/_0}}]]\n"
314
+ ]
315
+ },
316
+ {
317
+ "name": "stdout",
318
+ "output_type": "stream",
319
+ "text": [
320
+ "62/62 [==============================] - 38s 560ms/step - loss: 0.6074 - accuracy: 0.7657\n",
321
+ "Epoch 2/10\n",
322
+ "62/62 [==============================] - 44s 715ms/step - loss: 0.2596 - accuracy: 0.9018\n",
323
+ "Epoch 3/10\n",
324
+ "62/62 [==============================] - 50s 809ms/step - loss: 0.2202 - accuracy: 0.9165\n",
325
+ "Epoch 4/10\n",
326
+ "62/62 [==============================] - 52s 833ms/step - loss: 0.1985 - accuracy: 0.9276\n",
327
+ "Epoch 5/10\n",
328
+ "62/62 [==============================] - 51s 822ms/step - loss: 0.1963 - accuracy: 0.9276\n",
329
+ "Epoch 6/10\n",
330
+ "62/62 [==============================] - 57s 922ms/step - loss: 0.2040 - accuracy: 0.9236\n",
331
+ "Epoch 7/10\n",
332
+ "62/62 [==============================] - 57s 912ms/step - loss: 0.1698 - accuracy: 0.9357\n",
333
+ "Epoch 8/10\n",
334
+ "62/62 [==============================] - 52s 834ms/step - loss: 0.1672 - accuracy: 0.9332\n",
335
+ "Epoch 9/10\n",
336
+ "62/62 [==============================] - 50s 795ms/step - loss: 0.1603 - accuracy: 0.9408\n",
337
+ "Epoch 10/10\n",
338
+ "62/62 [==============================] - 48s 778ms/step - loss: 0.1711 - accuracy: 0.9332\n"
339
+ ]
340
+ }
341
+ ],
342
+ "source": [
343
+ "history = model.fit(\n",
344
+ " train_generator,\n",
345
+ " steps_per_epoch=train_generator.samples // batch_size,\n",
346
+ " epochs=10\n",
347
+ ")"
348
+ ]
349
+ }
350
+ ],
351
+ "metadata": {
352
+ "kernelspec": {
353
+ "display_name": "disdl",
354
+ "language": "python",
355
+ "name": "disdl"
356
+ },
357
+ "language_info": {
358
+ "codemirror_mode": {
359
+ "name": "ipython",
360
+ "version": 3
361
+ },
362
+ "file_extension": ".py",
363
+ "mimetype": "text/x-python",
364
+ "name": "python",
365
+ "nbconvert_exporter": "python",
366
+ "pygments_lexer": "ipython3",
367
+ "version": "3.11.5"
368
+ }
369
+ },
370
+ "nbformat": 4,
371
+ "nbformat_minor": 5
372
+ }