Kuwegjer commited on
Commit
d8c5c5c
β€’
1 Parent(s): 1fdae34

Upload 3 files

Browse files
Files changed (3) hide show
  1. Model_Training_onepiece.ipynb +269 -0
  2. app.py +32 -0
  3. requirements.txt +18 -0
Model_Training_onepiece.ipynb ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "# Import der benΓΆtigten Bibliotheken\n",
10
+ "import numpy as np\n",
11
+ "import tensorflow as tf\n",
12
+ "from tensorflow.keras.applications import ResNet50\n",
13
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
14
+ "from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout\n",
15
+ "from tensorflow.keras.models import Model\n",
16
+ "from tensorflow.keras.optimizers import Adam\n",
17
+ "from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "markdown",
22
+ "metadata": {},
23
+ "source": [
24
+ "Vorbereitung der Daten\n"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "execution_count": 2,
30
+ "metadata": {},
31
+ "outputs": [
32
+ {
33
+ "name": "stdout",
34
+ "output_type": "stream",
35
+ "text": [
36
+ "Found 78 images belonging to 6 classes.\n",
37
+ "Found 16 images belonging to 6 classes.\n"
38
+ ]
39
+ }
40
+ ],
41
+ "source": [
42
+ "# Daten-Vorbereitung\n",
43
+ "base_dir = 'C:\\Daten\\Studium Wirtschaftsinformatik\\Semester 6 TZ\\KI-Anwendungen\\Übungen\\Übung2\\Abschluss\\DatensÀtze\\Strohhüte' # Pfad zum übergeordneten Ordner, der die Klassenordner enthÀlt\n",
44
+ "datagen = ImageDataGenerator(\n",
45
+ " rescale=1./255,\n",
46
+ " rotation_range=40,\n",
47
+ " width_shift_range=0.2,\n",
48
+ " height_shift_range=0.2,\n",
49
+ " shear_range=0.2,\n",
50
+ " zoom_range=0.2,\n",
51
+ " horizontal_flip=True,\n",
52
+ " fill_mode='nearest',\n",
53
+ " validation_split=0.2 # Behalte die Aufteilung fΓΌr Training und Validation bei\n",
54
+ ")\n",
55
+ "train_generator = datagen.flow_from_directory(\n",
56
+ " base_dir,\n",
57
+ " target_size=(224, 224), # Assuming using ResNet input dimensions\n",
58
+ " batch_size=32, # Adjust according to your system capability\n",
59
+ " class_mode='categorical',\n",
60
+ " subset='training' # Use the 'subset' argument for splitting\n",
61
+ ")\n",
62
+ "\n",
63
+ "validation_generator = datagen.flow_from_directory(\n",
64
+ " base_dir,\n",
65
+ " target_size=(224, 224),\n",
66
+ " batch_size=32,\n",
67
+ " class_mode='categorical',\n",
68
+ " subset='validation'\n",
69
+ ")"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "markdown",
74
+ "metadata": {},
75
+ "source": [
76
+ "Modell Setup"
77
+ ]
78
+ },
79
+ {
80
+ "cell_type": "code",
81
+ "execution_count": 3,
82
+ "metadata": {},
83
+ "outputs": [],
84
+ "source": [
85
+ "# Modell-Setup\n",
86
+ "base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))\n",
87
+ "base_model.trainable = False # Zuerst wird das Basismodell eingefroren\n",
88
+ "\n",
89
+ "x = GlobalAveragePooling2D()(base_model.output)\n",
90
+ "x = Dense(1024, activation='relu')(x)\n",
91
+ "x = Dropout(0.5)(x) # Dropout hinzugefΓΌgt, um Overfitting zu reduzieren\n",
92
+ "predictions = Dense(3, activation='softmax')(x)\n",
93
+ "\n",
94
+ "model = Model(inputs=base_model.input, outputs=predictions)\n",
95
+ "model.compile(optimizer=Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['accuracy'])"
96
+ ]
97
+ },
98
+ {
99
+ "cell_type": "markdown",
100
+ "metadata": {},
101
+ "source": [
102
+ "Training des Models"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "code",
107
+ "execution_count": 4,
108
+ "metadata": {},
109
+ "outputs": [
110
+ {
111
+ "name": "stdout",
112
+ "output_type": "stream",
113
+ "text": [
114
+ "Epoch 1/20\n"
115
+ ]
116
+ },
117
+ {
118
+ "ename": "ValueError",
119
+ "evalue": "Arguments `target` and `output` must have the same shape. Received: target.shape=(None, 6), output.shape=(None, 3)",
120
+ "output_type": "error",
121
+ "traceback": [
122
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
123
+ "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
124
+ "Cell \u001b[1;32mIn[4], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m# Trainieren des Modells\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m history \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 3\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrain_generator\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43msteps_per_epoch\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrain_generator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msamples\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mtrain_generator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mvalidation_data\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvalidation_generator\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 6\u001b[0m \u001b[43m \u001b[49m\u001b[43mvalidation_steps\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvalidation_generator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msamples\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mvalidation_generator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 7\u001b[0m \u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m20\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 8\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\n\u001b[0;32m 9\u001b[0m \u001b[43m \u001b[49m\u001b[43mModelCheckpoint\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mbest_model.keras\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msave_best_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 10\u001b[0m \u001b[43m \u001b[49m\u001b[43mEarlyStopping\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmonitor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mval_loss\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpatience\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m5\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 11\u001b[0m \u001b[43m \u001b[49m\u001b[43mReduceLROnPlateau\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmonitor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mval_loss\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfactor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.2\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpatience\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m2\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m 12\u001b[0m \u001b[43m \u001b[49m\u001b[43m]\u001b[49m\n\u001b[0;32m 13\u001b[0m \u001b[43m)\u001b[49m\n",
125
+ "File \u001b[1;32mc:\\Users\\Jeremy Kuwegu\\anaconda3\\envs\\kia\\lib\\site-packages\\keras\\src\\utils\\traceback_utils.py:122\u001b[0m, in \u001b[0;36mfilter_traceback.<locals>.error_handler\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 119\u001b[0m filtered_tb \u001b[38;5;241m=\u001b[39m _process_traceback_frames(e\u001b[38;5;241m.\u001b[39m__traceback__)\n\u001b[0;32m 120\u001b[0m \u001b[38;5;66;03m# To get the full stack trace, call:\u001b[39;00m\n\u001b[0;32m 121\u001b[0m \u001b[38;5;66;03m# `keras.config.disable_traceback_filtering()`\u001b[39;00m\n\u001b[1;32m--> 122\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\u001b[38;5;241m.\u001b[39mwith_traceback(filtered_tb) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 123\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m 124\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m filtered_tb\n",
126
+ "File \u001b[1;32mc:\\Users\\Jeremy Kuwegu\\anaconda3\\envs\\kia\\lib\\site-packages\\keras\\src\\backend\\tensorflow\\nn.py:554\u001b[0m, in \u001b[0;36mcategorical_crossentropy\u001b[1;34m(target, output, from_logits, axis)\u001b[0m\n\u001b[0;32m 552\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m e1, e2 \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(target\u001b[38;5;241m.\u001b[39mshape, output\u001b[38;5;241m.\u001b[39mshape):\n\u001b[0;32m 553\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m e1 \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m e2 \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m e1 \u001b[38;5;241m!=\u001b[39m e2:\n\u001b[1;32m--> 554\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 555\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mArguments `target` and `output` must have the same shape. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 556\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mReceived: \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 557\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtarget.shape=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtarget\u001b[38;5;241m.\u001b[39mshape\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, output.shape=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00moutput\u001b[38;5;241m.\u001b[39mshape\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 558\u001b[0m )\n\u001b[0;32m 560\u001b[0m output, from_logits \u001b[38;5;241m=\u001b[39m _get_logits(\n\u001b[0;32m 561\u001b[0m output, from_logits, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSoftmax\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcategorical_crossentropy\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 562\u001b[0m )\n\u001b[0;32m 563\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m from_logits:\n",
127
+ "\u001b[1;31mValueError\u001b[0m: Arguments `target` and `output` must have the same shape. Received: target.shape=(None, 6), output.shape=(None, 3)"
128
+ ]
129
+ }
130
+ ],
131
+ "source": [
132
+ "# Trainieren des Modells\n",
133
+ "history = model.fit(\n",
134
+ " train_generator,\n",
135
+ " steps_per_epoch=train_generator.samples // train_generator.batch_size,\n",
136
+ " validation_data=validation_generator,\n",
137
+ " validation_steps=validation_generator.samples // validation_generator.batch_size,\n",
138
+ " epochs=20,\n",
139
+ " callbacks=[\n",
140
+ " ModelCheckpoint('best_model.keras', save_best_only=True),\n",
141
+ " EarlyStopping(monitor='val_loss', patience=5),\n",
142
+ " ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=2)\n",
143
+ " ]\n",
144
+ ")"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "markdown",
149
+ "metadata": {},
150
+ "source": [
151
+ "Fine Tuning des Modells"
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": null,
157
+ "metadata": {},
158
+ "outputs": [
159
+ {
160
+ "name": "stdout",
161
+ "output_type": "stream",
162
+ "text": [
163
+ "Epoch 1/10\n",
164
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m148s\u001b[0m 12s/step - accuracy: 0.7070 - loss: 1.0460 - val_accuracy: 0.6094 - val_loss: 0.9729\n",
165
+ "Epoch 2/10\n",
166
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 135ms/step - accuracy: 1.0000 - loss: 0.0769 - val_accuracy: 0.5714 - val_loss: 1.0434\n",
167
+ "Epoch 3/10\n",
168
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m100s\u001b[0m 10s/step - accuracy: 0.9677 - loss: 0.1108 - val_accuracy: 0.5469 - val_loss: 0.9639\n",
169
+ "Epoch 4/10\n",
170
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m10s\u001b[0m 111ms/step - accuracy: 1.0000 - loss: 0.0381 - val_accuracy: 0.7143 - val_loss: 0.9019\n",
171
+ "Epoch 5/10\n",
172
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m96s\u001b[0m 10s/step - accuracy: 0.9992 - loss: 0.0220 - val_accuracy: 0.2969 - val_loss: 1.1206\n",
173
+ "Epoch 6/10\n",
174
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m12s\u001b[0m 164ms/step - accuracy: 1.0000 - loss: 0.0226 - val_accuracy: 0.1429 - val_loss: 1.1233\n",
175
+ "Epoch 7/10\n",
176
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m97s\u001b[0m 10s/step - accuracy: 1.0000 - loss: 0.0062 - val_accuracy: 0.1719 - val_loss: 1.4363\n",
177
+ "Epoch 8/10\n",
178
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 103ms/step - accuracy: 0.9688 - loss: 0.0287 - val_accuracy: 0.1429 - val_loss: 1.4406\n",
179
+ "Epoch 9/10\n",
180
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m98s\u001b[0m 10s/step - accuracy: 0.9907 - loss: 0.0160 - val_accuracy: 0.2344 - val_loss: 1.4151\n",
181
+ "Epoch 10/10\n",
182
+ "\u001b[1m9/9\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m11s\u001b[0m 101ms/step - accuracy: 1.0000 - loss: 0.0038 - val_accuracy: 0.0000e+00 - val_loss: 1.6847\n"
183
+ ]
184
+ }
185
+ ],
186
+ "source": [
187
+ "# Fine-Tuning des Modells\n",
188
+ "for layer in base_model.layers:\n",
189
+ " layer.trainable = True\n",
190
+ "\n",
191
+ "model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])\n",
192
+ "history_fine = model.fit(\n",
193
+ " train_generator,\n",
194
+ " steps_per_epoch=train_generator.samples // train_generator.batch_size,\n",
195
+ " validation_data=validation_generator,\n",
196
+ " validation_steps=validation_generator.samples // validation_generator.batch_size,\n",
197
+ " epochs=10\n",
198
+ ")"
199
+ ]
200
+ },
201
+ {
202
+ "cell_type": "markdown",
203
+ "metadata": {},
204
+ "source": [
205
+ "Bewertung und Ergebnisse"
206
+ ]
207
+ },
208
+ {
209
+ "cell_type": "code",
210
+ "execution_count": null,
211
+ "metadata": {},
212
+ "outputs": [
213
+ {
214
+ "name": "stdout",
215
+ "output_type": "stream",
216
+ "text": [
217
+ "\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━��━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m7s\u001b[0m 3s/step - accuracy: 0.2708 - loss: 1.4508\n",
218
+ "Performance vor dem Fine-Tuning: 0.640625\n",
219
+ "Performance nach dem Fine-Tuning: 0.0\n"
220
+ ]
221
+ }
222
+ ],
223
+ "source": [
224
+ "# Ergebnisse bewerten\n",
225
+ "eval_result = model.evaluate(validation_generator, steps=validation_generator.samples // validation_generator.batch_size)\n",
226
+ "print(f'Performance vor dem Fine-Tuning: {history.history[\"val_accuracy\"][-1]}')\n",
227
+ "print(f'Performance nach dem Fine-Tuning: {history_fine.history[\"val_accuracy\"][-1]}')"
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "code",
232
+ "execution_count": null,
233
+ "metadata": {},
234
+ "outputs": [
235
+ {
236
+ "name": "stderr",
237
+ "output_type": "stream",
238
+ "text": [
239
+ "WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`. \n"
240
+ ]
241
+ }
242
+ ],
243
+ "source": [
244
+ "model.save('mein_modell.h5') "
245
+ ]
246
+ }
247
+ ],
248
+ "metadata": {
249
+ "kernelspec": {
250
+ "display_name": "kia",
251
+ "language": "python",
252
+ "name": "python3"
253
+ },
254
+ "language_info": {
255
+ "codemirror_mode": {
256
+ "name": "ipython",
257
+ "version": 3
258
+ },
259
+ "file_extension": ".py",
260
+ "mimetype": "text/x-python",
261
+ "name": "python",
262
+ "nbconvert_exporter": "python",
263
+ "pygments_lexer": "ipython3",
264
+ "version": "3.9.19"
265
+ }
266
+ },
267
+ "nbformat": 4,
268
+ "nbformat_minor": 2
269
+ }
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import numpy as np
4
+ from tensorflow.keras.preprocessing import image as keras_image
5
+ from tensorflow.keras.applications.resnet50 import preprocess_input
6
+ from tensorflow.keras.models import load_model
7
+
8
+ # Load your trained model
9
+ model = load_model(r'C:\Daten\Studium Wirtschaftsinformatik\Semester 6 TZ\KI-Anwendungen\Übungen\Übung2\Abschluss\mein_modell.h5')
10
+
11
+ def predict_character(img):
12
+ img = Image.fromarray(img.astype('uint8'), 'RGB') # Ensure the image is in RGB
13
+ img = img.resize((224, 224)) # Resize the image to the input size of the model
14
+ img_array = keras_image.img_to_array(img) # Convert the image to an array
15
+ img_array = np.expand_dims(img_array, axis=0) # Expand dimensions to match model input
16
+ img_array = preprocess_input(img_array) # Preprocess the input as expected by ResNet50
17
+
18
+ prediction = model.predict(img_array) # Predict using the model
19
+ classes = ['Chopper', 'Nami', 'Ruffy', 'Sanji', 'Usopp', 'Zoro'] # Character names as per your dataset
20
+ return {classes[i]: float(prediction[0][i]) for i in range(len(classes))} # Return the prediction in a dictionary format
21
+
22
+ # Define Gradio interface
23
+ interface = gr.Interface(
24
+ fn=predict_character,
25
+ inputs=gr.Image(), # Gradio handles resizing automatically based on the model input
26
+ outputs=gr.Label(num_top_classes=6), # Show top 3 predictions
27
+ title="One Piece Character Classifier",
28
+ description="Upload an image of a One Piece character and the classifier will predict which character it is."
29
+ )
30
+
31
+ # Launch the interface
32
+ interface.launch()
requirements.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ blinker==1.7.0
2
+ click==8.1.7
3
+ Flask==3.0.2
4
+ Flask-Cors==4.0.0
5
+ itsdangerous==2.1.2
6
+ Jinja2==3.1.3
7
+ joblib==1.3.2
8
+ MarkupSafe==2.1.5
9
+ numpy==1.26.4
10
+ pandas==2.2.1
11
+ python-dateutil==2.8.2
12
+ pytz==2024.1
13
+ scikit-learn==1.4.1.post1
14
+ scipy==1.12.0
15
+ six==1.16.0
16
+ threadpoolctl==3.3.0
17
+ tzdata==2024.1
18
+ Werkzeug==3.0.1