besar00 commited on
Commit
a9edbc9
β€’
1 Parent(s): 8a192eb

Upload 12 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Pokemon_transfer_learning.keras filter=lfs diff=lfs merge=lfs -text
Pokemon_transfer_learning.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66eb1773d3990704727c86b7204651478e43773a8365609627cc5b1aaf0dfff6
3
+ size 250560147
app.ipynb ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 10,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import gradio as gr\n",
10
+ "import tensorflow as tf\n",
11
+ "import numpy as np\n",
12
+ "from PIL import Image"
13
+ ]
14
+ },
15
+ {
16
+ "cell_type": "code",
17
+ "execution_count": 11,
18
+ "metadata": {},
19
+ "outputs": [],
20
+ "source": [
21
+ "model_path = \"Pokemon_transfer_learning.keras\"\n",
22
+ "model = tf.keras.models.load_model(model_path)"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 12,
28
+ "metadata": {},
29
+ "outputs": [],
30
+ "source": [
31
+ "# Define the core prediction function\n",
32
+ "def predict_pokemon(image):\n",
33
+ " # Preprocess image\n",
34
+ " print(type(image))\n",
35
+ " image = Image.fromarray(image.astype('uint8')) # Convert numpy array to PIL image\n",
36
+ " image = image.resize((150, 150)) #resize the image to 28x28 and converts it to gray scale\n",
37
+ " image = np.array(image)\n",
38
+ " image = np.expand_dims(image, axis=0) # same as image[None, ...]\n",
39
+ " \n",
40
+ " # Predict\n",
41
+ " prediction = model.predict(image)\n",
42
+ " \n",
43
+ " # Because the output layer was dense(0) without an activation function, we need to apply sigmoid to get the probability\n",
44
+ " # we could also change the output layer to dense(1, activation='sigmoid')\n",
45
+ " prediction = np.round(prediction, 2)\n",
46
+ " # Separate the probabilities for each class\n",
47
+ " p_abra = prediction[0][0] # Probability for class 'abra'\n",
48
+ " p_beedrill = prediction[0][1] # Probability for class 'moltres'\n",
49
+ " p_sandshrew = prediction[0][2] # Probability for class 'zapdos'\n",
50
+ " return {'abra': p_abra, 'beedrill': p_beedrill, 'sandshrew': p_sandshrew}"
51
+ ]
52
+ },
53
+ {
54
+ "cell_type": "code",
55
+ "execution_count": 13,
56
+ "metadata": {},
57
+ "outputs": [
58
+ {
59
+ "name": "stdout",
60
+ "output_type": "stream",
61
+ "text": [
62
+ "Running on local URL: http://127.0.0.1:7862\n",
63
+ "\n",
64
+ "To create a public link, set `share=True` in `launch()`.\n"
65
+ ]
66
+ },
67
+ {
68
+ "data": {
69
+ "text/html": [
70
+ "<div><iframe src=\"http://127.0.0.1:7862/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
71
+ ],
72
+ "text/plain": [
73
+ "<IPython.core.display.HTML object>"
74
+ ]
75
+ },
76
+ "metadata": {},
77
+ "output_type": "display_data"
78
+ },
79
+ {
80
+ "data": {
81
+ "text/plain": []
82
+ },
83
+ "execution_count": 13,
84
+ "metadata": {},
85
+ "output_type": "execute_result"
86
+ },
87
+ {
88
+ "name": "stdout",
89
+ "output_type": "stream",
90
+ "text": [
91
+ "<class 'numpy.ndarray'>\n",
92
+ "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 1s/step\n"
93
+ ]
94
+ }
95
+ ],
96
+ "source": [
97
+ "# Create the Gradio interface\n",
98
+ "input_image = gr.Image()\n",
99
+ "iface = gr.Interface(\n",
100
+ " fn=predict_pokemon,\n",
101
+ " inputs=input_image, \n",
102
+ " outputs=gr.Label(),\n",
103
+ " examples=[\"images/abra1.png\", \"images/abra2.jpg\", \"images/abra3.png\", \"images/beedrill1.png\", \"images/beedrill2.png\", \"images/beedrill3.jpg\", \"images/sandshrew1.png\", \"images/sandshrew2.jpg\", \"images/sandshrew3.png\"], \n",
104
+ " description=\"A simple mlp classification model for image classification using the mnist dataset.\")\n",
105
+ "iface.launch(share=True)"
106
+ ]
107
+ }
108
+ ],
109
+ "metadata": {
110
+ "kernelspec": {
111
+ "display_name": "venv_new",
112
+ "language": "python",
113
+ "name": "python3"
114
+ },
115
+ "language_info": {
116
+ "codemirror_mode": {
117
+ "name": "ipython",
118
+ "version": 3
119
+ },
120
+ "file_extension": ".py",
121
+ "mimetype": "text/x-python",
122
+ "name": "python",
123
+ "nbconvert_exporter": "python",
124
+ "pygments_lexer": "ipython3",
125
+ "version": "3.11.3"
126
+ }
127
+ },
128
+ "nbformat": 4,
129
+ "nbformat_minor": 2
130
+ }
images/abra1.png ADDED
images/abra2.jpg ADDED
images/abra3.png ADDED
images/beedrill1.png ADDED
images/beedrill2.png ADDED
images/beedrill3.jpg ADDED
images/sandshrew1.png ADDED
images/sandshrew2.jpg ADDED
images/sandshrew3.png ADDED
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ tensorflow==2.16.1