Spaces:
Runtime error
Runtime error
better model keras_digit_test_include.h5
Browse files- app.py +76 -103
- keras_digit_temp.h5 +0 -3
app.py
CHANGED
@@ -2,27 +2,85 @@ import tensorflow as tf
|
|
2 |
from tensorflow import keras
|
3 |
from tensorflow.keras import Sequential
|
4 |
from tensorflow.keras.layers import Dense, Flatten
|
|
|
5 |
import matplotlib.pyplot as plt
|
6 |
import gradio as gr
|
7 |
import numpy as np
|
8 |
import pandas as pd
|
|
|
|
|
9 |
#%matplotlib inline
|
10 |
-
|
|
|
11 |
|
12 |
objt=tf.keras.datasets.mnist
|
13 |
(X_train, y_train), (X_test,y_test)=objt.load_data()
|
14 |
|
15 |
-
print(X_train.shape)
|
16 |
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
# for i in range(9):
|
20 |
# plt.subplot(330+1+i)
|
21 |
# plt.imshow(X_train[i])
|
22 |
# plt.show()
|
23 |
|
24 |
-
X_train=X_train/255.0
|
25 |
-
X_test=X_test/255.0
|
26 |
|
27 |
# model=tf.keras.models.Sequential([Flatten(input_shape=(28,28)),
|
28 |
|
@@ -45,36 +103,30 @@ X_test=X_test/255.0
|
|
45 |
# predicted=model.predict(test)
|
46 |
# print(predicted)
|
47 |
|
|
|
|
|
48 |
def predict_digit(img):
|
49 |
if img is not None:
|
50 |
|
51 |
-
loaded_model = keras.models.load_model('
|
52 |
-
|
53 |
|
|
|
|
|
|
|
54 |
img_3d=img.reshape(-1,28,28)
|
55 |
img_resized=img_3d/255.0
|
56 |
pred_prob=loaded_model.predict(img_resized)
|
57 |
-
|
58 |
pred_prob=pred_prob*100
|
59 |
|
60 |
print((pred_prob))
|
61 |
-
|
62 |
-
# prob1= 100*pred_prob[1]
|
63 |
-
# prob2= 100*pred_prob[2]
|
64 |
-
# prob3= 100*pred_prob[3]
|
65 |
-
# prob4= 100*pred_prob[4]
|
66 |
-
# prob5= 100*pred_prob[5]
|
67 |
-
# prob6= 100*pred_prob[6]
|
68 |
-
# prob7= 100*pred_prob[7]
|
69 |
-
# prob8= 100*pred_prob[8]
|
70 |
-
# prob9= 100*pred_prob[9]
|
71 |
-
|
72 |
-
# print(prob2)
|
73 |
|
74 |
simple = pd.DataFrame(
|
75 |
{
|
76 |
"a": ["0", "1", "2", "3", "4", "5", "6", "7", "8","9"],
|
77 |
-
"b": pred_prob[0],
|
78 |
}
|
79 |
)
|
80 |
|
@@ -95,7 +147,7 @@ def predict_digit(img):
|
|
95 |
simple_empty = pd.DataFrame(
|
96 |
{
|
97 |
"a": ["0", "1", "2", "3", "4", "5", "6", "7", "8","9"],
|
98 |
-
"b": [0,0,0,0,0,0,0,0,0,0],
|
99 |
}
|
100 |
)
|
101 |
|
@@ -109,6 +161,7 @@ def predict_digit(img):
|
|
109 |
tooltip=["a", "b"],
|
110 |
vertical=False,
|
111 |
y_lim=[0, 100],
|
|
|
112 |
)
|
113 |
|
114 |
|
@@ -131,7 +184,7 @@ with gr.Blocks(css=css) as demo:
|
|
131 |
with gr.Row():
|
132 |
with gr.Column():
|
133 |
gr.Markdown("<h1>Digit Identifier</h1>", elem_id='title_head')
|
134 |
-
gr.Markdown("<h2>By Alok</h2>",elem_id=
|
135 |
with gr.Row():
|
136 |
with gr.Column():
|
137 |
with gr.Row():
|
@@ -156,83 +209,3 @@ demo.launch()
|
|
156 |
|
157 |
|
158 |
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
# import tensorflow as tf
|
180 |
-
# from tensorflow import keras
|
181 |
-
# from tensorflow.keras import Sequential
|
182 |
-
# from tensorflow.keras.layers import Dense, Flatten
|
183 |
-
# import matplotlib.pyplot as plt
|
184 |
-
# import gradio as gr
|
185 |
-
# import numpy as np
|
186 |
-
# %matplotlib inline
|
187 |
-
|
188 |
-
|
189 |
-
# objt=tf.keras.datasets.mnist
|
190 |
-
# (X_train, y_train), (X_test,y_test)=objt.load_data()
|
191 |
-
|
192 |
-
# print(X_train.shape)
|
193 |
-
|
194 |
-
# print(y_train)
|
195 |
-
|
196 |
-
# for i in range(9):
|
197 |
-
# plt.subplot(330+1+i)
|
198 |
-
# plt.imshow(X_train[i])
|
199 |
-
# plt.show()
|
200 |
-
|
201 |
-
# X_train=X_train/255.0
|
202 |
-
# X_test=X_test/255.0
|
203 |
-
|
204 |
-
# model=tf.keras.models.Sequential([Flatten(input_shape=(28,28)),
|
205 |
-
|
206 |
-
# Dense(650,activation='relu'),
|
207 |
-
|
208 |
-
# Dense(450,activation='relu'),
|
209 |
-
|
210 |
-
# Dense(250,activation='relu'),
|
211 |
-
|
212 |
-
# Dense(150,activation='relu'),
|
213 |
-
|
214 |
-
# Dense(10,activation=tf.nn.softmax)])
|
215 |
-
|
216 |
-
# model.compile(optimizer='adam',
|
217 |
-
# loss='sparse_categorical_crossentropy',
|
218 |
-
# metrics=['accuracy'])
|
219 |
-
# model.fit(X_train,y_train, epochs=10)
|
220 |
-
# model.save("keras_digit_temp.h5")
|
221 |
-
# test=X_test[0].reshape(-1,28,28)
|
222 |
-
# predicted=model.predict(test)
|
223 |
-
# print(predicted)
|
224 |
-
|
225 |
-
# def prdict_digit(img):
|
226 |
-
# loaded_model = keras.models.load_model('keras_digit_temp.h5')
|
227 |
-
# img_3d=img.reshape(-1,28,28)
|
228 |
-
# img_resized=img_3d/255.0
|
229 |
-
# pred_prob=loaded_model.predict(img_resized)
|
230 |
-
# predicted_val=np.argmax(pred_prob)
|
231 |
-
# return int(predicted_val)
|
232 |
-
|
233 |
-
# iface=gr.Interface(prdict_digit, inputs='sketchpad', outputs='label').launch()
|
234 |
-
|
235 |
-
# iface.launch(debug='true')
|
236 |
-
|
237 |
-
|
238 |
-
|
|
|
2 |
from tensorflow import keras
|
3 |
from tensorflow.keras import Sequential
|
4 |
from tensorflow.keras.layers import Dense, Flatten
|
5 |
+
from tensorflow.keras import layers
|
6 |
import matplotlib.pyplot as plt
|
7 |
import gradio as gr
|
8 |
import numpy as np
|
9 |
import pandas as pd
|
10 |
+
from PIL import Image as im
|
11 |
+
import PIL
|
12 |
#%matplotlib inline
|
13 |
+
num_classes = 10
|
14 |
+
input_shape = (28, 28, 1)
|
15 |
|
16 |
objt=tf.keras.datasets.mnist
|
17 |
(X_train, y_train), (X_test,y_test)=objt.load_data()
|
18 |
|
|
|
19 |
|
20 |
+
# X_train = X_train.astype("float32") / 255
|
21 |
+
# X_test = X_test.astype("float32") / 255
|
22 |
+
# # Make sure images have shape (28, 28, 1)
|
23 |
+
# X_train = np.expand_dims(X_train, -1)
|
24 |
+
# X_test = np.expand_dims(X_test, -1)
|
25 |
+
# print("x_train shape:", X_train.shape)
|
26 |
+
# print(X_train.shape[0], "train samples")
|
27 |
+
# print(X_test.shape[0], "test samples")
|
28 |
+
|
29 |
+
|
30 |
+
# # convert class vectors to binary class matrices
|
31 |
+
# y_train = keras.utils.to_categorical(y_train, num_classes)
|
32 |
+
# y_test = keras.utils.to_categorical(y_test, num_classes)
|
33 |
+
|
34 |
+
# X_new=np.concatenate((X_train, X_test))
|
35 |
+
# y_new=np.concatenate((y_train, y_test))
|
36 |
+
# print(X_train.shape)
|
37 |
+
# print(X_new.shape)
|
38 |
+
# print(y_new.shape)
|
39 |
+
|
40 |
+
# print(y_train)
|
41 |
+
|
42 |
+
# model = keras.Sequential(
|
43 |
+
# [
|
44 |
+
# keras.Input(shape=input_shape),
|
45 |
+
# layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
|
46 |
+
# layers.MaxPooling2D(pool_size=(2, 2)),
|
47 |
+
# layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
|
48 |
+
# layers.MaxPooling2D(pool_size=(2, 2)),
|
49 |
+
# layers.Flatten(),
|
50 |
+
# layers.Dropout(0.5),
|
51 |
+
# layers.Dense(num_classes, activation="softmax"),
|
52 |
+
# ]
|
53 |
+
# )
|
54 |
+
|
55 |
+
# model.summary()
|
56 |
+
|
57 |
+
# batch_size = 128
|
58 |
+
# epochs = 15
|
59 |
+
|
60 |
+
# model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
|
61 |
+
|
62 |
+
# model.fit(X_new, y_new, batch_size=batch_size, epochs=epochs, validation_split=0.1)
|
63 |
+
# model.save("keras_digit_test_include.h5")
|
64 |
+
|
65 |
+
# score = model.evaluate(X_test, y_test, verbose=0)
|
66 |
+
# print("Test loss:", score[0])
|
67 |
+
# print("Test accuracy:", score[1])
|
68 |
+
|
69 |
+
# loaded_model = keras.models.load_model('keras_digit_accurate.h5')
|
70 |
+
# score = loaded_model.evaluate(X_test, y_test, verbose=0)
|
71 |
+
# print("Test loss:", score[0])
|
72 |
+
# print("Test accuracy:", score[1])
|
73 |
+
|
74 |
+
#................................................................................................
|
75 |
+
|
76 |
|
77 |
# for i in range(9):
|
78 |
# plt.subplot(330+1+i)
|
79 |
# plt.imshow(X_train[i])
|
80 |
# plt.show()
|
81 |
|
82 |
+
# X_train=X_train/255.0
|
83 |
+
# X_test=X_test/255.0
|
84 |
|
85 |
# model=tf.keras.models.Sequential([Flatten(input_shape=(28,28)),
|
86 |
|
|
|
103 |
# predicted=model.predict(test)
|
104 |
# print(predicted)
|
105 |
|
106 |
+
#count=0
|
107 |
+
|
108 |
def predict_digit(img):
|
109 |
if img is not None:
|
110 |
|
111 |
+
loaded_model = keras.models.load_model('keras_digit_test_include.h5')
|
112 |
+
|
113 |
|
114 |
+
#img_data = im.fromarray(img)
|
115 |
+
#img_data.save(f"image1.jpg")
|
116 |
+
#count=count+1
|
117 |
img_3d=img.reshape(-1,28,28)
|
118 |
img_resized=img_3d/255.0
|
119 |
pred_prob=loaded_model.predict(img_resized)
|
120 |
+
|
121 |
pred_prob=pred_prob*100
|
122 |
|
123 |
print((pred_prob))
|
124 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
simple = pd.DataFrame(
|
127 |
{
|
128 |
"a": ["0", "1", "2", "3", "4", "5", "6", "7", "8","9"],
|
129 |
+
"b": pred_prob[0],
|
130 |
}
|
131 |
)
|
132 |
|
|
|
147 |
simple_empty = pd.DataFrame(
|
148 |
{
|
149 |
"a": ["0", "1", "2", "3", "4", "5", "6", "7", "8","9"],
|
150 |
+
"b": [0,0,0,0,0,0,0,0,0,0],
|
151 |
}
|
152 |
)
|
153 |
|
|
|
161 |
tooltip=["a", "b"],
|
162 |
vertical=False,
|
163 |
y_lim=[0, 100],
|
164 |
+
|
165 |
)
|
166 |
|
167 |
|
|
|
184 |
with gr.Row():
|
185 |
with gr.Column():
|
186 |
gr.Markdown("<h1>Digit Identifier</h1>", elem_id='title_head')
|
187 |
+
gr.Markdown("<h2>By Alok</h2>", elem_id="name_head")
|
188 |
with gr.Row():
|
189 |
with gr.Column():
|
190 |
with gr.Row():
|
|
|
209 |
|
210 |
|
211 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
keras_digit_temp.h5
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:381032914c5af60afcaa0aaf305475fd6d9228ef6496bb56c3153913b1974d5c
|
3 |
-
size 11511128
|
|
|
|
|
|
|
|