{ "cells": [ { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 579 images belonging to 5 classes.\n", "Found 124 images belonging to 5 classes.\n", "Found 235 images belonging to 5 classes.\n", "Epoch 1/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m14s\u001b[0m 423ms/step - accuracy: 0.2483 - loss: 2.7352 - val_accuracy: 0.2339 - val_loss: 2.1511\n", "Epoch 2/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 370ms/step - accuracy: 0.2506 - loss: 2.0916 - val_accuracy: 0.2339 - val_loss: 1.9442\n", "Epoch 3/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 397ms/step - accuracy: 0.2298 - loss: 1.9145 - val_accuracy: 0.2339 - val_loss: 1.8353\n", "Epoch 4/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 374ms/step - accuracy: 0.2367 - loss: 1.8245 - val_accuracy: 0.2339 - val_loss: 1.7828\n", "Epoch 5/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 373ms/step - accuracy: 0.2411 - loss: 1.7718 - val_accuracy: 0.2339 - val_loss: 1.7444\n", "Epoch 6/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 369ms/step - accuracy: 0.1975 - loss: 1.7408 - val_accuracy: 0.2339 - val_loss: 1.7163\n", "Epoch 7/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 387ms/step - accuracy: 0.2479 - loss: 1.7097 - val_accuracy: 0.2339 - val_loss: 1.6960\n", "Epoch 8/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 360ms/step - accuracy: 0.2480 - loss: 1.6955 - val_accuracy: 0.2339 - val_loss: 1.6844\n", "Epoch 9/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 381ms/step - accuracy: 0.2417 - loss: 1.6826 - val_accuracy: 0.2339 - val_loss: 1.6750\n", "Epoch 10/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 415ms/step - accuracy: 0.2540 - loss: 1.6696 - val_accuracy: 0.2339 - val_loss: 1.6658\n", "Epoch 11/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 377ms/step - accuracy: 0.2436 - loss: 1.6617 - val_accuracy: 0.2339 - val_loss: 1.6583\n", "Epoch 12/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 367ms/step - accuracy: 0.2333 - loss: 1.6566 - val_accuracy: 0.2339 - val_loss: 1.6490\n", "Epoch 13/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 363ms/step - accuracy: 0.2229 - loss: 1.6558 - val_accuracy: 0.2339 - val_loss: 1.6456\n", "Epoch 14/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 359ms/step - accuracy: 0.2277 - loss: 1.6478 - val_accuracy: 0.2339 - val_loss: 1.6423\n", "Epoch 15/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 354ms/step - accuracy: 0.2638 - loss: 1.6418 - val_accuracy: 0.2339 - val_loss: 1.6430\n", "Epoch 16/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 374ms/step - accuracy: 0.2305 - loss: 1.6422 - val_accuracy: 0.2339 - val_loss: 1.6400\n", "Epoch 17/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 382ms/step - accuracy: 0.2539 - loss: 1.6356 - val_accuracy: 0.2339 - val_loss: 1.6367\n", "Epoch 18/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 391ms/step - accuracy: 0.2547 - loss: 1.6336 - val_accuracy: 0.2339 - val_loss: 1.6331\n", "Epoch 19/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 366ms/step - accuracy: 0.2326 - loss: 1.6322 - val_accuracy: 0.2339 - val_loss: 1.6280\n", "Epoch 20/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 365ms/step - accuracy: 0.2219 - loss: 1.6340 - val_accuracy: 0.2339 - val_loss: 1.6312\n", "Epoch 21/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 351ms/step - accuracy: 0.2411 - loss: 1.6299 - val_accuracy: 0.2339 - val_loss: 1.6290\n", "Epoch 22/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 356ms/step - accuracy: 0.2371 - loss: 1.6308 - val_accuracy: 0.2339 - val_loss: 1.6272\n", "Epoch 23/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 359ms/step - accuracy: 0.2460 - loss: 1.6243 - val_accuracy: 0.2339 - val_loss: 1.6255\n", "Epoch 24/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 365ms/step - accuracy: 0.2181 - loss: 1.6281 - val_accuracy: 0.2339 - val_loss: 1.6240\n", "Epoch 25/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 364ms/step - accuracy: 0.2336 - loss: 1.6254 - val_accuracy: 0.2339 - val_loss: 1.6226\n", "Epoch 26/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 360ms/step - accuracy: 0.2102 - loss: 1.6278 - val_accuracy: 0.2339 - val_loss: 1.6217\n", "Epoch 27/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 359ms/step - accuracy: 0.2553 - loss: 1.6174 - val_accuracy: 0.2339 - val_loss: 1.6204\n", "Epoch 28/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 366ms/step - accuracy: 0.2452 - loss: 1.6189 - val_accuracy: 0.2339 - val_loss: 1.6191\n", "Epoch 29/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 360ms/step - accuracy: 0.2409 - loss: 1.6163 - val_accuracy: 0.2339 - val_loss: 1.6163\n", "Epoch 30/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 388ms/step - accuracy: 0.2268 - loss: 1.6160 - val_accuracy: 0.2339 - val_loss: 1.6186\n", "Epoch 31/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 393ms/step - accuracy: 0.2233 - loss: 1.6230 - val_accuracy: 0.2339 - val_loss: 1.6179\n", "Epoch 32/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 359ms/step - accuracy: 0.2562 - loss: 1.6140 - val_accuracy: 0.2339 - val_loss: 1.6170\n", "Epoch 33/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 360ms/step - accuracy: 0.2387 - loss: 1.6158 - val_accuracy: 0.2339 - val_loss: 1.6163\n", "Epoch 34/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 352ms/step - accuracy: 0.2353 - loss: 1.6159 - val_accuracy: 0.2339 - val_loss: 1.6155\n", "Epoch 35/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 367ms/step - accuracy: 0.2491 - loss: 1.6107 - val_accuracy: 0.2339 - val_loss: 1.6145\n", "Epoch 36/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 347ms/step - accuracy: 0.2277 - loss: 1.6167 - val_accuracy: 0.2339 - val_loss: 1.6142\n", "Epoch 37/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 361ms/step - accuracy: 0.2633 - loss: 1.6109 - val_accuracy: 0.2339 - val_loss: 1.6143\n", "Epoch 38/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 355ms/step - accuracy: 0.2541 - loss: 1.6108 - val_accuracy: 0.2339 - val_loss: 1.6136\n", "Epoch 39/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 349ms/step - accuracy: 0.2679 - loss: 1.6037 - val_accuracy: 0.2339 - val_loss: 1.6120\n", "Epoch 40/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 362ms/step - accuracy: 0.2324 - loss: 1.6133 - val_accuracy: 0.2339 - val_loss: 1.6077\n", "Epoch 41/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 357ms/step - accuracy: 0.2440 - loss: 1.6085 - val_accuracy: 0.2339 - val_loss: 1.6138\n", "Epoch 42/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 380ms/step - accuracy: 0.2423 - loss: 1.6108 - val_accuracy: 0.2339 - val_loss: 1.6136\n", "Epoch 43/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 354ms/step - accuracy: 0.2276 - loss: 1.6149 - val_accuracy: 0.2339 - val_loss: 1.6136\n", "Epoch 44/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 357ms/step - accuracy: 0.2335 - loss: 1.6132 - val_accuracy: 0.2339 - val_loss: 1.6128\n", "Epoch 45/50\n", "\u001b[1m19/19\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m9s\u001b[0m 356ms/step - accuracy: 0.2477 - loss: 1.6109 - val_accuracy: 0.2339 - val_loss: 1.6119\n", "\u001b[1m8/8\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 89ms/step - accuracy: 0.1173 - loss: 1.6083\n", "Test loss: 1.607354760169983\n", "Test accuracy: 0.12340425699949265\n" ] } ], "source": [ "import numpy as np\n", "import tensorflow as tf\n", "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n", "from tensorflow.keras.models import Sequential\n", "from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n", "from tensorflow.keras.regularizers import l2\n", "from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n", "\n", "# Definiere die Größe der Bilder und den Pfad zum Datensatz\n", "img_size = (150, 150)\n", "data_dir = r'C:\\kiamep'\n", "\n", "# Erstelle Datengeneratoren für Training, Validierung und Test mit Datenvergrößerung\n", "train_datagen = ImageDataGenerator(\n", " rescale=1./255,\n", " rotation_range=40,\n", " width_shift_range=0.2,\n", " height_shift_range=0.2,\n", " shear_range=0.2,\n", " zoom_range=0.2,\n", " horizontal_flip=True,\n", " fill_mode='nearest',\n", " vertical_flip=True, # Vertikale Spiegelung hinzugefügt\n", " brightness_range=[0.8, 1.2], # Helligkeitsanpassung hinzugefügt\n", " \n", "\n", ")\n", "\n", "train_generator = train_datagen.flow_from_directory(\n", " data_dir + '\\\\Trainingsdaten',\n", " target_size=img_size,\n", " batch_size=32,\n", " class_mode='categorical'\n", ")\n", "\n", "validation_datagen = ImageDataGenerator(rescale=1./255)\n", "\n", "validation_generator = validation_datagen.flow_from_directory(\n", " data_dir + '\\\\Validierungsdaten',\n", " target_size=img_size,\n", " batch_size=32,\n", " class_mode='categorical'\n", ")\n", "\n", "test_datagen = ImageDataGenerator(rescale=1./255)\n", "\n", "test_generator = test_datagen.flow_from_directory(\n", " data_dir + '\\\\Testdaten',\n", " target_size=img_size,\n", " batch_size=32,\n", " class_mode='categorical'\n", ")\n", "\n", "# Definiere das verbesserte CNN-Modell\n", "# Regularisierung gegen overfiting (Gwicht)\n", "# Dropout auch gegen overfiting (Neuronen Drop)\n", "# Mehr Neuronen 512\n", "model = Sequential([\n", " Conv2D(32, (3, 3), activation='relu', kernel_regularizer=l2(0.001), input_shape=(img_size[0], img_size[1], 3)),\n", " MaxPooling2D(pool_size=(2, 2)),\n", " \n", " Conv2D(64, (3, 3), activation='relu', kernel_regularizer=l2(0.001)),\n", " MaxPooling2D(pool_size=(2, 2)),\n", " \n", " Conv2D(128, (3, 3), activation='relu', kernel_regularizer=l2(0.001)),\n", " MaxPooling2D(pool_size=(2, 2)),\n", " \n", " Conv2D(256, (3, 3), activation='relu', kernel_regularizer=l2(0.001)),\n", " MaxPooling2D(pool_size=(2, 2)),\n", " \n", " Flatten(),\n", " Dense(512, activation='relu', kernel_regularizer=l2(0.001)),\n", " Dropout(0.5),\n", " Dense(5, activation='softmax')\n", "])\n", "\n", "# Kompiliere das Modell\n", "model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n", "\n", "# Definiere Callbacks\n", "# Auch gegen Overfiting early stop \n", "early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n", "model_checkpoint = ModelCheckpoint('best_model.keras', monitor='val_accuracy', save_best_only=True)\n", "\n", "# Trainiere das Modell\n", "# mehr epochen\n", "history = model.fit(\n", " train_generator,\n", " epochs=50,\n", " validation_data=validation_generator,\n", " callbacks=[early_stopping, model_checkpoint]\n", ")\n", "\n", "# Evaluiere das Modell auf den Testdaten\n", "test_loss, test_accuracy = model.evaluate(test_generator)\n", "print(\"Test loss:\", test_loss)\n", "print(\"Test accuracy:\", test_accuracy)\n", "\n", "# Speichere das beste Modell im Keras-Format\n", "model.save('best_model.keras')" ] } ], "metadata": { "kernelspec": { "display_name": "kia", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.2" } }, "nbformat": 4, "nbformat_minor": 2 }