Sreeja123 commited on
Commit
52e296b
1 Parent(s): 8fa9632
Files changed (1) hide show
  1. main.py +1175 -0
main.py ADDED
@@ -0,0 +1,1175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import tensorflow as tf
4
+ import keras
5
+ from tensorflow.python.keras.utils.np_utils import to_categorical
6
+ from keras.models import Sequential
7
+ import numpy as np
8
+ import matplotlib.pyplot as plt
9
+ import pandas as pd
10
+ import cv2
11
+
12
+ from sklearn.model_selection import train_test_split
13
+ # from keras.layers import TimeDistributed as TD
14
+ from Time_Distributed import TimeDistributed as TD
15
+ import Memristor as mem
16
+ from SCNN import Integrator_layer, Reduce_sum, sparse_data_generator_non_spiking
17
+
18
+ from sklearn.metrics import precision_score
19
+ from sklearn.metrics import recall_score
20
+ from sklearn.metrics import f1_score
21
+
22
+ print('Num GPUs Available: ', tf.config.list_physical_devices('GPU'))
23
+ st.success('This is a success message!', icon="✅")
24
+
25
+ if 'nn_type' not in st.session_state:
26
+ st.session_state.nn_type = None
27
+ if 'snn' not in st.session_state:
28
+ st.session_state.snn = False
29
+ if 'load' not in st.session_state:
30
+ st.session_state.load = False
31
+ if 'upld' not in st.session_state:
32
+ st.session_state.upld = False
33
+ if 'custom' not in st.session_state:
34
+ st.session_state.custom = False
35
+ # Initialization session_state for added layers
36
+ if 'submittedLayers' not in st.session_state:
37
+ st.session_state.submittedLayers = []
38
+
39
+ if 'descr' not in st.session_state:
40
+ st.session_state.descr = {}
41
+ if 'x_train' not in st.session_state:
42
+ st.session_state.x_train = None
43
+ if 'y_train' not in st.session_state:
44
+ st.session_state.y_train = None
45
+ if 'x_test' not in st.session_state:
46
+ st.session_state.x_test = None
47
+ if 'y_test' not in st.session_state:
48
+ st.session_state.y_test = None
49
+ if 'ip_shape' not in st.session_state:
50
+ st.session_state.ip_shape = None
51
+ if 'model' not in st.session_state:
52
+ st.session_state.model = None
53
+
54
+
55
+ st.title("Build your Neural Network")
56
+
57
+ # Select box for neural network type
58
+ nn_type = st.selectbox("Please be specific about the Neural Network",("Hardware","Software"))
59
+ makeIt = st.button('Make It')
60
+
61
+ c1, c2, c3 = st.columns((8,1,1))
62
+ with c1:
63
+ st.write('Are you going to build a SCNN?',st.session_state.snn)
64
+
65
+ with c2:
66
+ snn = st.button('Yes')
67
+ with c3:
68
+ No_snn = st.button('No')
69
+
70
+ if snn:
71
+ st.session_state.snn = True
72
+ if No_snn:
73
+ st.session_state.snn = False
74
+
75
+ if makeIt:
76
+ st.session_state.nn_type = nn_type
77
+ st.session_state.load = False
78
+
79
+
80
+ # Select box for selecting the dataset
81
+ st.session_state.dataset = st.sidebar.selectbox("Select and Load dataset",("mnist","cifar10","cifar100","Iris"))
82
+
83
+ # uploaded_file = st.sidebar.file_uploader("Choose a csv file")
84
+
85
+ # if uploaded_file is not None:
86
+
87
+ # # Can be used wherever a "file-like" object is accepted:
88
+ # dataframe = pd.read_csv(uploaded_file)
89
+ # st.write(dataframe)
90
+
91
+
92
+ c1,c2 = st.sidebar.columns((1,2))
93
+ with c1:
94
+ load = st.button('Load')
95
+ with c2:
96
+ upld = st.button('Upload image dataset')
97
+
98
+ if load:
99
+ st.session_state.load = True
100
+ st.session_state.submittedLayers = []
101
+
102
+ if upld:
103
+ if st.session_state.upld:
104
+ st.session_state.upld = False
105
+ else:
106
+ st.session_state.upld = True
107
+
108
+ def custom_dataset(path,shape,test_size):
109
+ shape = eval(shape)
110
+ classes = []
111
+ for p in os.listdir(path):
112
+ if os.path.isdir(os.path.join(path,p)):
113
+ classes.append(p)
114
+ images = []
115
+ label = []
116
+ label_count = 0
117
+ for clss in classes:
118
+ trg_path = os.path.join(path,clss)
119
+ for img in os.listdir(trg_path):
120
+ img = cv2.imread(trg_path+'/'+img)
121
+ img = cv2.resize(img,shape)
122
+ img_array = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
123
+ images.append(img_array)
124
+ label.append(label_count)
125
+ label_count += 1
126
+ images = np.array(images)
127
+ label = np.array(label)
128
+ n_classes = len(classes)
129
+ x_train, x_test, y_train, y_test = train_test_split(images, label, test_size=test_size, random_state=42)
130
+ return x_train, x_test, y_train, y_test, n_classes
131
+
132
+
133
+ if st.session_state.upld:
134
+ st.sidebar.warning('The Image folder should be in a format "Root folder--> class1 folder-->(images), class2 folder-->(images), etc"')
135
+ # st.sidebar.caption('Root folder--> class1 folder-->(images), class2 folder-->(images), etc')
136
+ rpath = st.sidebar.text_input('Give path of the Root folder')
137
+
138
+ shape = st.sidebar.text_input('Target shape in tuple format')
139
+ st.sidebar.caption('target shape is the shape in which all your images will be resized into. eg:(32,32)')
140
+
141
+ test_size = st.sidebar.number_input('Test_size for splitting dataset',min_value=0.0,max_value=1.0,value=0.2)
142
+
143
+ done = st.sidebar.button('Done')
144
+ if done:
145
+ st.session_state.x_train, st.session_state.x_test, st.session_state.y_train, st.session_state.y_test, n_classes = custom_dataset(rpath,shape,test_size)
146
+ st.sidebar.success('Successfully uploaded')
147
+ st.session_state.y_train = np.asarray(st.session_state.y_train).astype('float32').reshape((-1,1))
148
+ st.session_state.y_test = np.asarray(st.session_state.y_test).astype('float32').reshape((-1,1))
149
+ st.session_state.custom = True
150
+ st.session_state.descr = {'Number of classes': n_classes,
151
+ 'x_train shape ': st.session_state.x_train.shape,
152
+ 'x_test shape ': st.session_state.x_test.shape,
153
+ 'y_train shape ': st.session_state.y_train.shape,
154
+ 'y_test shape ': st.session_state.y_test.shape}
155
+ st.session_state.ip_shape = st.session_state.x_train.shape[1:]
156
+ st.session_state.model = Sequential()
157
+ st.session_state.model.add(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape))
158
+
159
+
160
+ if not st.session_state.load or not st.session_state.custom:
161
+ st.write('Load or upload the dataset from the sidebar')
162
+
163
+ # function for loading the selected dataset
164
+ def get_dataset(dataset):
165
+ if dataset=="mnist":
166
+ descr = {
167
+ "Dataset" : "MNIST digits classification dataset",
168
+ "About" : "This is a dataset of 60,000 28x28 grayscale images of the 10 digits, along with a test set of 10,000 images.",
169
+ "xTrain" : "uint8 NumPy array of grayscale image data with shapes (60000, 28, 28), containing the training data. Pixel values range from 0 to 255.",
170
+ "yTrain" : "uint8 NumPy array of digit labels (integers in range 0-9) with shape (60000,) for the training data.",
171
+ "xTest" : "uint8 NumPy array of grayscale image data with shapes (10000, 28, 28), containing the test data. Pixel values range from 0 to 255.",
172
+ "yTest" : "uint8 NumPy array of digit labels (integers in range 0-9) with shape (10000,) for the test data."
173
+ }
174
+ (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
175
+
176
+ # Model / data parameters
177
+ num_classes = 10
178
+ ip_shape = (28, 28, 1)
179
+
180
+ # Scale images to the [0, 1] range
181
+ x_train = x_train.astype("float32") / 255
182
+ x_test = x_test.astype("float32") / 255
183
+
184
+ # Make sure images have shape (28, 28, 1)
185
+ x_train = np.expand_dims(x_train, -1)
186
+ x_test = np.expand_dims(x_test, -1)
187
+
188
+ # convert class vectors to binary class matrices
189
+ y_train = to_categorical(y_train, num_classes)
190
+ y_test = to_categorical(y_test, num_classes)
191
+ st.sidebar.success("Dataset loaded",icon='🤩')
192
+
193
+ elif dataset=="cifar10":
194
+ descr = {
195
+ "Dataset":"CIFAR10 small images classification dataset",
196
+ "About":"This is a dataset of 50,000 32x32 color training images and 10,000 test images, labeled over 10 categories.",
197
+ "xTrain": "uint8 NumPy array of grayscale image data with shapes (50000, 32, 32, 3), containing the training data. Pixel values range from 0 to 255.",
198
+ "yTrain": "uint8 NumPy array of labels (integers in range 0-9) with shape (50000, 1) for the training data.",
199
+ "xTest": "uint8 NumPy array of grayscale image data with shapes (10000, 32, 32, 3), containing the test data. Pixel values range from 0 to 255.",
200
+ "yTest": "uint8 NumPy array of labels (integers in range 0-9) with shape (10000, 1) for the test data."
201
+ }
202
+ (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
203
+ num_classes = 10
204
+ ip_shape = (32, 32, 3)
205
+
206
+ # Scale images to the [0, 1] range
207
+ x_train = x_train.astype("float32") / 255.0
208
+ x_test = x_test.astype("float32") / 255.0
209
+
210
+ # convert class vectors to binary class matrices
211
+ y_train = to_categorical(y_train, num_classes)
212
+ y_test = to_categorical(y_test, num_classes)
213
+ st.sidebar.success("Dataset loaded",icon='🤩')
214
+
215
+ elif dataset=="cifar100":
216
+ descr = {
217
+ "Dataset":"CIFAR10 small images classification dataset",
218
+ "About":"This is a dataset of 50,000 32x32 color training images and 10,000 test images, labeled over 100 fine-grained classes that are grouped into 20 coarse-grained classes.",
219
+ "xTrain": "uint8 NumPy array of grayscale image data with shapes (50000, 32, 32, 3), containing the training data. Pixel values range from 0 to 255.",
220
+ "yTrain": "uint8 NumPy array of labels (integers in range 0-9) with shape (50000, 1) for the training data.",
221
+ "xTest": "uint8 NumPy array of grayscale image data with shapes (10000, 32, 32, 3), containing the test data. Pixel values range from 0 to 255.",
222
+ "yTest": "uint8 NumPy array of labels (integers in range 0-9) with shape (10000, 1) for the test data."
223
+ }
224
+ (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data()
225
+ num_classes = 100
226
+ ip_shape = (32, 32, 3)
227
+
228
+ # Scale images to the [0, 1] range
229
+ x_train = x_train.astype("float32") / 255.0
230
+ x_test = x_test.astype("float32") / 255.0
231
+
232
+ # convert class vectors to binary class matrices
233
+ y_train = to_categorical(y_train, num_classes)
234
+ y_test = to_categorical(y_test, num_classes)
235
+ st.sidebar.success("Dataset loaded",icon='🤩')
236
+
237
+ elif dataset=='Iris':
238
+ from sklearn.datasets import load_iris
239
+ from sklearn.preprocessing import OneHotEncoder
240
+ from sklearn.model_selection import train_test_split
241
+
242
+ iris_data = load_iris()
243
+ x = iris_data.data
244
+ y_ = iris_data.target.reshape(-1, 1)
245
+
246
+ encoder = OneHotEncoder(sparse=False)
247
+ y = encoder.fit_transform(y_)
248
+
249
+ x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20)
250
+ ip_shape = (4,)
251
+ descr={'Dataset':'Iris dataset',
252
+ 'About':'This data sets consists of 3 different types of irises’ (Setosa, Versicolour, and Virginica) petal and sepal length, stored in a 150x4 numpy.ndarray. The rows being the samples and the columns being: Sepal Length, Sepal Width, Petal Length and Petal Width.',
253
+ 'x_train' : 'x_train shape is (120, 4)',
254
+ 'x_test' : 'x_test shape is (30, 4)',
255
+ 'y_train' : 'y_train shape is (120, 1)',
256
+ 'y_test' : 'y_test shape is (30, 1)'
257
+ }
258
+ st.sidebar.success("Dataset loaded",icon='🤩')
259
+ else:
260
+ st.write("Please select a dataset")
261
+
262
+ return descr, ip_shape, x_train, y_train, x_test, y_test
263
+
264
+ #loading the dataset
265
+ if load:
266
+ descr,ip_shape, x_train, y_train, x_test, y_test = get_dataset(st.session_state.dataset)
267
+ st.session_state.x_train = x_train
268
+ st.session_state.y_train = y_train
269
+ st.session_state.x_test = x_test
270
+ st.session_state.y_test = y_test
271
+ st.session_state.descr = descr
272
+ st.session_state.ip_shape = ip_shape
273
+ st.session_state.model = Sequential()
274
+ if st.session_state.snn:
275
+ st.session_state.model.add(TD(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape)))
276
+ else:
277
+ st.session_state.model.add(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape))
278
+
279
+ if (st.session_state.load or st.session_state.custom) and st.session_state.nn_type:
280
+ if st.session_state.model == None:
281
+ st.session_state.model = Sequential()
282
+ st.session_state.model.add(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape))
283
+ # st.write(st.session_state.ip_shape)
284
+ # if st.session_state.nn_type == 'Hardware':
285
+ # st.session_state.Hmodel = Sequential()
286
+ # st.session_state.Hmodel.add(tf.keras.layers.InputLayer(input_shape=ip_shape))
287
+ if (st.session_state.dataset == 'mnist' and st.session_state.load):
288
+ st.sidebar.caption('The loaded dataset has shape (28,28,1). If you want to reshape it to (784,) please click the below button')
289
+ reshape = st.sidebar.button('Reshape')
290
+ if reshape:
291
+ num_pixels = 784
292
+ st.session_state.x_train = st.session_state.x_train.reshape(st.session_state.x_train.shape[0], num_pixels)
293
+ st.session_state.x_test = st.session_state.x_test.reshape(st.session_state.x_test.shape[0], num_pixels)
294
+ st.session_state.ip_shape = (784,)
295
+ st.session_state.model = Sequential()
296
+ st.session_state.model.add(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape))
297
+ st.session_state.submittedLayers = []
298
+ st.sidebar.success('Successfully reshaped')
299
+ # st.sidebar.write(st.session_state.x_train.shape)
300
+
301
+ if load and not st.session_state.nn_type:
302
+ st.sidebar.error("Are you sure that you selected the type of your Neural Network. If not make it and try loading again.....")
303
+
304
+ # container showing loaded dataset discription
305
+ with st.container():
306
+ if st.session_state.descr =={}:
307
+ pass
308
+ else:
309
+ st.subheader('Loaded dataset')
310
+ for i in st.session_state.descr.keys():
311
+ st.write(i," : ",st.session_state.descr[i])
312
+
313
+ if st.session_state.custom:
314
+ Norm = st.button('Normalize the dataset')
315
+ st.caption('If Normalization shows error, try changing target shape to lower pixel sizes like (32,32) and upload again. Or you can skip normalization step and move on. But remember that this step will affect the accuracy of your model.')
316
+ if Norm:
317
+ st.session_state.x_train = st.session_state.x_train.astype("float32") / 255
318
+ st.session_state.x_test = st.session_state.x_test.astype("float32") / 255
319
+ st.success('Succesfully Normalized')
320
+
321
+ if st.session_state.snn:
322
+ c1,c2 = st.columns(2)
323
+ with c1:
324
+ b_size = st.number_input('batch_size', value = 32)
325
+ n_steps = st.number_input('number of steps', value = 100)
326
+ with c2:
327
+ sh = st.selectbox('shuffle',(True,False))
328
+ fl = st.selectbox('flatten',(False,True))
329
+ timesteps = st.number_input('timesteps', value = 100)
330
+ c1,c2,c3 = st.columns((1,1,1))
331
+ with c2:
332
+ spike = st.button('Generate spiking dataset')
333
+
334
+ if spike:
335
+ x_train_for_spiking = st.session_state.x_train
336
+ x_test_for_spiking = st.session_state.x_test
337
+ y_train_for_spiking = st.session_state.y_train
338
+ y_test_for_spiking = st.session_state.y_test
339
+ ip_shape_for_spiking = [st.session_state.ip_shape[0], st.session_state.ip_shape[1], st.session_state.ip_shape[2]]
340
+ st.session_state.dataset_generator = tf.data.Dataset.from_generator(lambda: sparse_data_generator_non_spiking(input_images=x_train_for_spiking,
341
+ input_labels=y_train_for_spiking,
342
+ batch_size=b_size,
343
+ nb_steps=n_steps, shuffle=True,
344
+ flatten=fl),
345
+ output_shapes=((None, timesteps, ip_shape_for_spiking[0], ip_shape_for_spiking[1], ip_shape_for_spiking[2]), (None, 10)),
346
+ output_types=(tf.float64, tf.uint8))
347
+ st.session_state.dataset_generator_test = tf.data.Dataset.from_generator(lambda: sparse_data_generator_non_spiking(input_images=x_test_for_spiking,
348
+ input_labels=y_test_for_spiking,
349
+ batch_size=b_size,
350
+ nb_steps=n_steps, shuffle=sh,
351
+ flatten=fl),
352
+ output_shapes=((None, timesteps, ip_shape_for_spiking[0], ip_shape_for_spiking[1], ip_shape_for_spiking[2]), (None, 10)),
353
+ output_types=(tf.float64, tf.uint8))
354
+
355
+ st.success('Successfully generated')
356
+
357
+ # dict storing each layers and parameters
358
+ LAYERSandPARAMS={
359
+ "Reshape":{
360
+ "target_shape":'(28, 28, 1)',
361
+ "name":"Reshape_1"
362
+ },
363
+ "Dense":{
364
+ "units": 10,
365
+ "activation":("relu","sigmoid","softmax","softplus","softsign","tanh","selu","elu","exponential",None),
366
+ "kernel_initializer":("RandomUniform","RandomNormal","TruncatedNormal","Zeros","Ones","GlorotNormal","GlorotUniform","HeNormal","HeUniform","Identity","Orthogonal","Constant","VarianceScaling"),
367
+ "bias_initializer":("zeros","RandomNormal","RandomUniform","TruncatedNormal","Ones","GlorotNormal","GlorotUniform","HeNormal","HeUniform","Identity","Orthogonal","Constant","VarianceScaling"),
368
+ "name":"dense_1"
369
+ },
370
+ "Conv2D":{
371
+ "filters": 32,
372
+ "kernel_size":3,
373
+ "strides":1,
374
+ "activation":("relu","sigmoid","softmax","softplus","softsign","tanh","selu","elu","exponential",None),
375
+ "padding":("valid","same","causal"),
376
+ "kernel_initializer":("RandomUniform","RandomNormal","TruncatedNormal","Zeros","Ones","GlorotNormal","GlorotUniform","HeNormal","HeUniform","Identity","Orthogonal","Constant","VarianceScaling"),
377
+ "bias_initializer":("zeros","RandomNormal","RandomUniform","TruncatedNormal","Ones","GlorotNormal","GlorotUniform","HeNormal","HeUniform","Identity","Orthogonal","Constant","VarianceScaling"),
378
+ "name":"Conv2D_1"
379
+ },
380
+ "DepthwiseConv2D":{
381
+ "kernel_size":3,
382
+ "depth_multiplier":1,
383
+ "depthwise_initializer":("glorot_uniform","RandomNormal","RandomUniform","TruncatedNormal","Zeros","Ones","GlorotNormal","HeNormal","HeUniform","Identity","Orthogonal","Constant","VarianceScaling"),
384
+ "depthwise_constraint":(None,"MaxNorm","MinMaxNorm","NonNeg","UnitNorm","RadialConstraint"),
385
+ "depthwise_regularizer":(None,"L1","L2","L1L2","OrthogonalRegularizer"),
386
+ "name":"DepthwiseConv2D_1"
387
+ },
388
+ "MaxPooling1D":{
389
+ "pool_size":2,
390
+ "strides":1,
391
+ "padding":("valid","same"),
392
+ "data_format":("channels_last","channels_first"),
393
+ "name":"MaxPooling1D_1"
394
+ },
395
+ "MaxPooling2D":{
396
+ "pool_size":2,
397
+ "strides":1,
398
+ "padding":("valid","same"),
399
+ "data_format":("channels_last","channels_first"),
400
+ "name":"MaxPooling2D_1"
401
+ },
402
+ "AveragePooling1D":{
403
+ "pool_size":2,
404
+ "strides":1,
405
+ "padding":("valid","same"),
406
+ "data_format":("channels_last","channels_first"),
407
+ "name":"AveragePooling1D_1"
408
+ },
409
+ "AveragePooling2D":{
410
+ "pool_size":2,
411
+ "strides":1,
412
+ "padding":("valid","same"),
413
+ "data_format":("channels_last","channels_first"),
414
+ "name":"AveragePooling1D_1"
415
+ },
416
+ "Dropout":{
417
+ "rate":0.5,
418
+ "name":"Dropout_1"
419
+ },
420
+ "GaussianNoise":{
421
+ "stddev":0.2
422
+ },
423
+ "GaussianDropout":{
424
+ "rate":0.5
425
+ },
426
+ "AlphaDropout":{
427
+ "rate":0.5,
428
+ #"noise_shape":2,
429
+ "seed":1
430
+ },
431
+ "LSTM":{
432
+ "units":5,
433
+ "return_sequences":True,
434
+ "activation":("tanh","sigmoid","relu","softmax","softplus","softsign","selu","elu","exponential",None),
435
+ "recurrent_activation":("sigmoid","relu","softmax","softplus","softsign","tanh","selu","elu","exponential",None),
436
+ "use_bias":True,
437
+ "kernel_initializer":("glorot_uniform","RandomNormal","RandomUniform","TruncatedNormal","Zeros","Ones","GlorotNormal","HeNormal","HeUniform","Identity","Orthogonal","Constant","VarianceScaling"),
438
+ "recurrent_initializer":("Orthogonal","glorot_uniform","RandomNormal","RandomUniform","TruncatedNormal","Zeros","Ones","GlorotNormal","HeNormal","HeUniform","Identity","Constant","VarianceScaling"),
439
+ "bias_initializer":("zeros","RandomNormal","RandomUniform","TruncatedNormal","Ones","GlorotNormal","GlorotUniform","HeNormal","HeUniform","Identity","Orthogonal","Constant","VarianceScaling"),
440
+ "name":"LSTM_1"
441
+ },
442
+ "Flatten":{"name":"Flatten_1"},
443
+ "Integrator_layer":{"name":"Integrator_layer_1"},
444
+ "Reduce_sum":{"name":"Reduce_sum_1"},
445
+
446
+ }
447
+
448
+ # form for setting the parameters of the layer selected and Submit(Software)
449
+ if st.session_state.snn:
450
+ with st.sidebar:
451
+ layer = st.selectbox("Select a layer",('Conv2D', 'Integrator_layer', 'Flatten', 'Dense', 'Reduce_sum'))
452
+ with st.form("SNNParams"):
453
+ params = dict()
454
+ if layer in LAYERSandPARAMS.keys():
455
+ st.caption('Set the parameters below')
456
+ for i in LAYERSandPARAMS[layer].keys():
457
+ if i=='units':
458
+ val = st.number_input(i,min_value=0, max_value=None, value=LAYERSandPARAMS[layer][i])
459
+ params[i] = val
460
+ if i=='filters':
461
+ val = st.number_input(i,min_value=0, max_value=None, value=LAYERSandPARAMS[layer][i])
462
+ params[i] = val
463
+ if i=='kernel_size':
464
+ val = st.number_input(i,min_value=0, max_value=None, value=LAYERSandPARAMS[layer][i])
465
+ params[i] = val
466
+ if i=='name':
467
+ val = st.text_input(i, value=LAYERSandPARAMS[layer][i])
468
+ st.caption('Please update name when each layer is added')
469
+ params[i] = val
470
+
471
+ submitted = st.form_submit_button("Submit")
472
+ st.caption('Submitted layers will be displayed in the main page under Added Layers.')
473
+ if submitted:
474
+ if st.session_state.descr =={}:
475
+ st.error("Please load a dataset first, then start adding layers",icon='💁‍♀️')
476
+ else:
477
+ try:
478
+ if layer=='Dense':
479
+ st.session_state.model.add(TD(tf.keras.layers.Dense(
480
+ units=params['units'],
481
+ activation=None
482
+ ),name = params['name']))
483
+ if layer=='Conv2D':
484
+ st.session_state.model.add(TD(tf.keras.layers.Conv2D(
485
+ filters=params['filters'],
486
+ kernel_size=params['kernel_size'],
487
+ activation=None
488
+ ),name =params['name']))
489
+ if layer == 'Flatten':
490
+ st.session_state.model.add(TD(tf.keras.layers.Flatten(),name =params['name']))
491
+ if layer == 'Integrator_layer':
492
+ st.session_state.model.add(Integrator_layer(name=params['name']))
493
+ if layer == 'Reduce_sum':
494
+ st.session_state.model.add(Reduce_sum(name=params['name']))
495
+
496
+ st.session_state.submittedLayers.append([layer,params])
497
+ st.success('Submitted Successfully',icon='🎉')
498
+ st.write("Layer :", layer)
499
+ st.write("Parameters", params)
500
+ except Exception as ex:
501
+ st.error(ex,icon="🥺")
502
+
503
+ else:
504
+ with st.sidebar:
505
+ layer = st.selectbox("Select a layer",("Dense","Conv2D","DepthwiseConv2D","MaxPooling2D","Reshape","Flatten","Dropout","GaussianNoise","GaussianDropout","AlphaDropout"))
506
+ with st.form("Params"):
507
+ params = dict()
508
+ if layer in LAYERSandPARAMS.keys():
509
+ st.caption('Set the parameters below')
510
+ for i in LAYERSandPARAMS[layer].keys():
511
+ if isinstance(LAYERSandPARAMS[layer][i], tuple) and i!='target_shape':
512
+ val = st.selectbox(i,LAYERSandPARAMS[layer][i])
513
+ params[i] = val
514
+ elif i=='target_shape':
515
+ val = st.text_input(i, value=LAYERSandPARAMS[layer][i])
516
+ st.caption('Please enter in a tuple format, Eg:(28, 28, 1)')
517
+ params[i] = val
518
+ elif i=='rate' or i=='stddev':
519
+ val = st.number_input(i,min_value=0.0, max_value=1.0, value=LAYERSandPARAMS[layer][i])
520
+ params[i] = val
521
+ elif i=='name':
522
+ val = st.text_input(i, value=LAYERSandPARAMS[layer][i])
523
+ st.caption('Please update name when same layer is added')
524
+ params[i] = val
525
+ elif (i=="return_sequences") or (i =='use_bias'):
526
+ val = st.selectbox(i, (True,False))
527
+ params[i] = val
528
+ else:
529
+ val = st.number_input(i,min_value=0, max_value=None, value=LAYERSandPARAMS[layer][i])
530
+ params[i] = val
531
+ submitted = st.form_submit_button("Submit")
532
+ st.caption('Submitted layers will be displayed in the main page under Added Layers.')
533
+ if submitted:
534
+ if st.session_state.descr =={}:
535
+ st.error("Please load a dataset first, then start adding layers",icon='💁‍♀️')
536
+ else:
537
+ try:
538
+ if layer=='Dense':
539
+ st.session_state.model.add(tf.keras.layers.Dense(
540
+ units=params['units'],
541
+ activation=params['activation'],
542
+ kernel_initializer =params['kernel_initializer'],
543
+ bias_initializer =params['bias_initializer'],
544
+ name = params['name']
545
+ ))
546
+ if layer=='Conv2D':
547
+ st.session_state.model.add(tf.keras.layers.Conv2D(
548
+ filters=params['filters'],
549
+ kernel_size=params['kernel_size'],
550
+ activation=params['activation'],
551
+ strides =params['strides'],
552
+ padding =params['padding'],
553
+ kernel_initializer =params['kernel_initializer'],
554
+ bias_initializer =params['bias_initializer'],
555
+ name =params['name']
556
+ ))
557
+ if layer=='DepthwiseConv2D':
558
+ st.session_state.model.add(tf.keras.layers.DepthwiseConv2D(
559
+ kernel_size=params['kernel_size'],
560
+ depth_multiplier=params['depth_multiplier'],
561
+ depthwise_initializer=params['depthwise_initializer'],
562
+ depthwise_constraint=params['depthwise_constraint'],
563
+ depthwise_regularizer=params['depthwise_regularizer'],
564
+ name =params['name']
565
+ ))
566
+ if layer=='MaxPooling1D':
567
+ st.session_state.model.add(tf.keras.layers.MaxPooling1D(
568
+ pool_size=params['pool_size'],
569
+ strides =params['strides'],
570
+ padding =params['padding'],
571
+ data_format =params['data_format'],
572
+ name =params['name']
573
+ ))
574
+ if layer=='MaxPooling2D':
575
+ st.session_state.model.add(tf.keras.layers.MaxPooling2D(
576
+ pool_size=params['pool_size'],
577
+ strides =params['strides'],
578
+ padding =params['padding'],
579
+ data_format =params['data_format'],
580
+ name =params['name']
581
+ ))
582
+ if layer=='AveragePooling1D':
583
+ st.session_state.model.add(tf.keras.layers.AveragePooling1D(
584
+ pool_size=params['pool_size'],
585
+ strides =params['strides'],
586
+ padding =params['padding'],
587
+ data_format =params['data_format'],
588
+ name =params['name']
589
+ ))
590
+ if layer=='AveragePooling2D':
591
+ st.session_state.model.add(tf.keras.layers.AveragePooling2D(
592
+ pool_size=params['pool_size'],
593
+ strides =params['strides'],
594
+ padding =params['padding'],
595
+ data_format =params['data_format'],
596
+ name =params['name']
597
+ ))
598
+ if layer=='Reshape':
599
+ ts = eval(params['target_shape'])
600
+ st.session_state.model.add(tf.keras.layers.Reshape(
601
+ ts,name =params['name']
602
+ ))
603
+ if layer=='Dropout':
604
+ rate = params['rate']
605
+ st.session_state.model.add(tf.keras.layers.Dropout(
606
+ rate,name =params['name']
607
+ ))
608
+ if layer=='GaussianNoise':
609
+ st.session_state.model.add(tf.keras.layers.GaussianNoise(
610
+ stddev=params['stddev']
611
+ ))
612
+ if layer=='GaussianDropout':
613
+ st.session_state.model.add(tf.keras.layers.GaussianDropout(
614
+ rate=params['rate']
615
+ ))
616
+ if layer=='AlphaDropout':
617
+ st.session_state.model.add(tf.keras.layers.AlphaDropout(
618
+ rate=params['rate'],
619
+ #noise_shape=params['noise_shape'],
620
+ seed=params['seed']
621
+ ))
622
+ if layer == 'LSTM' and st.session_state.ip_shape != (4,):
623
+ if st.session_state.model.layers == []:
624
+ st.session_state.model = Sequential()
625
+ st.session_state.model.add(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape[:-1]))
626
+
627
+ if st.session_state.ip_shape[:-1] == 3:
628
+ st.session_state.x_train = np.array([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in st.session_state.x_train])
629
+ st.session_state.x_test = np.array([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in st.session_state.x_test])
630
+
631
+ st.session_state.model.add(tf.keras.layers.LSTM(
632
+ units=params['units'],
633
+ name = params['name'],
634
+ return_sequences=params['return_sequences']
635
+ ))
636
+ if layer == 'Flatten':
637
+ st.session_state.model.add(tf.keras.layers.Flatten())
638
+
639
+ st.session_state.submittedLayers.append([layer,params])
640
+ st.success('Submitted Successfully',icon='🎉')
641
+ st.write("Layer :", layer)
642
+ st.write("Parameters", params)
643
+ except Exception as ex:
644
+ st.error(ex,icon="🥺")
645
+
646
+ # if 'HardwareLayers' not in st.session_state:
647
+ # st.session_state.HardwareLayers = []
648
+
649
+ # HardwareLayers = {
650
+ # "Dense":{
651
+ # "units":3,
652
+ # "name":"Dense_1"
653
+ # },
654
+ # "LSTM":{
655
+ # "units":5,
656
+ # "return_sequences":True,
657
+ # "name":"LSTM_1"
658
+ # },
659
+ # "Conv2D":{
660
+ # "filters":3,
661
+ # "kernel_size":3,
662
+ # "name":"Conv2D_1"
663
+ # },
664
+ # "MaxPooling2D":{
665
+ # "pool_size":2,
666
+ # "name":"MaxPooling2D_1"
667
+ # }
668
+ # }
669
+
670
+ # if st.session_state.nn_type == 'Hardware':
671
+ # with st.sidebar:
672
+ # layer = st.selectbox("Select a layer",("Dense","Conv2D","MaxPooling2D","Flatten","LSTM"))
673
+ # with st.form("HParams"):
674
+ # params={}
675
+ # if layer in HardwareLayers.keys():
676
+ # for i in HardwareLayers[layer].keys():
677
+ # if i=="name":
678
+ # val = st.text_input(i, value=HardwareLayers[layer][i])
679
+ # st.caption('Please update name when same layer is added')
680
+ # params[i] = val
681
+ # elif i=="return_sequences":
682
+ # val = st.selectbox(i, (True,False))
683
+ # params[i] = val
684
+ # else:
685
+ # val = st.number_input(i,min_value=0, max_value=None, value=HardwareLayers[layer][i])
686
+ # params[i] = val
687
+
688
+ # submitted = st.form_submit_button("Submit")
689
+ # if submitted:
690
+ # if st.session_state.descr =={}:
691
+ # st.error("Please load a dataset first, then start adding layers",icon='💁‍♀️')
692
+ # else:
693
+ # try:
694
+ # if layer=='Dense':
695
+ # st.session_state.Hmodel.add(tf.keras.layers.Dense(
696
+ # units=params['units'],
697
+ # name = params['name']
698
+ # ))
699
+ # if layer=='Conv2D':
700
+ # st.session_state.Hmodel.add(tf.keras.layers.Conv2D(
701
+ # filters=params['filters'],
702
+ # kernel_size=params['kernel_size'],
703
+ # name = params['name']
704
+ # ))
705
+ # if layer == 'Flatten':
706
+ # st.session_state.Hmodel.add(tf.keras.layers.Flatten())
707
+
708
+ # if layer == 'MaxPooling2D':
709
+ # st.session_state.Hmodel.add(tf.keras.layers.MaxPooling2D(
710
+ # pool_size=params['pool_size'],
711
+ # name = params['name']
712
+ # ))
713
+ # if layer == 'LSTM' and st.session_state.ip_shape != (4,):
714
+ # if st.session_state.Hmodel.layers == []:
715
+ # st.session_state.Hmodel = Sequential()
716
+ # st.session_state.Hmodel.add(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape[:-1]))
717
+
718
+ # if st.session_state.ip_shape == (32,32,3):
719
+ # st.session_state.x_train = np.array([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in st.session_state.x_train])
720
+ # st.session_state.x_test = np.array([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in st.session_state.x_test])
721
+
722
+ # st.session_state.Hmodel.add(tf.keras.layers.LSTM(
723
+ # units=params['units'],
724
+ # name = params['name'],
725
+ # return_sequences=params['return_sequences']
726
+ # ))
727
+
728
+ # if layer == 'LSTM' and st.session_state.ip_shape == (4,):
729
+ # st.error('Please choose an appropriate dataset for the LSTM')
730
+ # else:
731
+ # st.session_state.HardwareLayers.append([layer,params])
732
+ # st.success('Submitted Successfully')
733
+ # st.write("Layer :", layer)
734
+ # st.write("Parameters", params)
735
+
736
+ # except Exception as ex:
737
+ # st.error(ex,icon="🥺")
738
+
739
+
740
+ if 'Store' not in st.session_state:
741
+ st.session_state.Store = {"Dataset":[],"loss":[], "accuracy":[],"precision":[],"recall":[],"f1 score":[],"Neural network config":[]}
742
+
743
+
744
+ def show_layers(layer_list):
745
+ for i in layer_list:
746
+ layer_with_idx = str((layer_list.index(i))+1)+' '+i[0]
747
+ with st.expander(layer_with_idx):
748
+ st.write(i[1])
749
+
750
+ def show_compile_fit():
751
+ with st.container():
752
+ col1, col2 = st.columns(2)
753
+ with col1:
754
+ st.subheader('Compile')
755
+ optimizer = st.selectbox('optimizer',('adam','sgd','rmsprop','nadam','adadelta','adagrad','adamax','ftrl'))
756
+ loss = st.selectbox('loss',('categorical_crossentropy','binary_crossentropy','sparse_categorical_crossentropy','poisson'))
757
+ with col2:
758
+ st.subheader('Fit')
759
+ epochs = st.number_input('epochs',max_value=None, min_value=1, value=2)
760
+ if st.session_state.snn:
761
+ # batch_size = 0
762
+ # count = st.number_input('repeat count',max_value=None, min_value=0, value=1)
763
+ txt = 'repeat count'
764
+ else:
765
+ txt = 'batch_size'
766
+ # count = 0
767
+ batch_size = st.number_input(txt,max_value=None, min_value=0, value=10)
768
+ # validation_split = st.number_input('validation_split',max_value=None, min_value=0.0, value=0.1)
769
+ return optimizer,loss,epochs,batch_size
770
+
771
+ def run_model(model,loss,optimizer,epochs,batch_size):
772
+ # print(model.summary())
773
+ print("Initialize epochs:", epochs)
774
+ try:
775
+ if st.session_state.snn:
776
+ if loss == 'categorical_crossentropy':
777
+ model.compile(loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True),
778
+ optimizer = optimizer,
779
+ metrics = ['accuracy'])
780
+ if loss == 'binary_crossentropy':
781
+ model.compile(loss = tf.keras.losses.BinaryCrossentropy(from_logits=True),
782
+ optimizer = optimizer,
783
+ metrics = ['accuracy'])
784
+ if loss == 'sparse_categorical_crossentropy':
785
+ model.compile(loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
786
+ optimizer = optimizer,
787
+ metrics = ['sparse_categorical_accuracy'])
788
+ if loss == 'poisson':
789
+ model.compile(loss = tf.keras.losses.Poisson(from_logits=True),
790
+ optimizer = optimizer,
791
+ metrics = ['accuracy'])
792
+
793
+ model_fit = model.fit(st.session_state.dataset_generator.repeat(count=1),
794
+ epochs=epochs,
795
+ validation_data=st.session_state.dataset_generator_test.repeat(count=1))
796
+ else:
797
+ model.compile(loss = loss,
798
+ optimizer = optimizer,
799
+ metrics = ['accuracy'])
800
+
801
+ model_fit = model.fit(st.session_state.x_train, st.session_state.y_train,
802
+ epochs=epochs,
803
+ batch_size=batch_size,
804
+ validation_data=(st.session_state.x_test, st.session_state.y_test))
805
+
806
+ # if st.session_state.snn:
807
+ # print("Hey hey People!!!", len(st.session_state.x_train))
808
+ # print("I am at the sesion state")
809
+ # print("1122", max(st.session_state["x_train"]))
810
+ # print('SNN training epochs:', epochs)
811
+ # print(epochs)
812
+ # model_fit = model.fit(st.session_state.dataset_generator.repeat(count=1),
813
+ # epochs=epochs,
814
+ # validation_data=st.session_state.dataset_generator_test.repeat(count=1))
815
+ # else:
816
+ # print("Initialize epochs non spike:", epochs)
817
+ # model_fit = model.fit(st.session_state.x_train, st.session_state.y_train,
818
+ # epochs = epochs,
819
+ # batch_size = batch_size,
820
+ # validation_data=(st.session_state.x_test, st.session_state.y_test))
821
+
822
+
823
+ # st.snow()
824
+ model.save_weights('Model_Weights.h5')
825
+ return model_fit
826
+ except Exception as ex:
827
+ st.error(ex)
828
+
829
+ def cal_result(model):
830
+ if st.session_state.snn:
831
+ st.session_state.score = model.evaluate(st.session_state.dataset_generator_test, verbose=2)
832
+ else:
833
+ st.session_state.score = model.evaluate(st.session_state.x_test, st.session_state.y_test, verbose=0)
834
+ y_test_class = np.argmax(st.session_state.y_test, axis=1)
835
+ y_pred = np.argmax(model.predict(st.session_state.x_test, verbose=0),axis=1)
836
+
837
+ # precision tp / (tp + fp)
838
+ precision = precision_score(y_test_class, y_pred, average='weighted', labels=np.unique(y_pred))
839
+ # recall: tp / (tp + fn)
840
+ recall = recall_score(y_test_class, y_pred, average='weighted', labels=np.unique(y_pred))
841
+ # f1: 2 tp / (2 tp + fp + fn)
842
+ f1 = f1_score(y_test_class, y_pred, average='weighted', labels=np.unique(y_pred))
843
+ config = model.get_config()
844
+ st.session_state.Store["Neural network config"].append(config)
845
+ st.session_state.Store["loss"].append(st.session_state.score[0])
846
+ st.session_state.Store["precision"].append(precision)
847
+ st.session_state.Store["accuracy"].append(st.session_state.score[1])
848
+ st.session_state.Store["recall"].append(recall)
849
+ st.session_state.Store["f1 score"].append(f1)
850
+ st.session_state.Store["Dataset"].append(st.session_state.dataset)
851
+
852
+ def show_results(model_fit):
853
+ st.subheader('Results')
854
+ st.write("Test loss:", st.session_state.score[0])
855
+ st.write("Test accuracy:", st.session_state.score[1])
856
+
857
+ col1, col2= st.columns([1,1])
858
+ with col1:
859
+ fig = plt.figure()
860
+ plt.plot(model_fit.history['loss'], label='train')
861
+ plt.plot(model_fit.history['val_loss'], label='val')
862
+ plt.ylabel('loss')
863
+ plt.xlabel('epoch')
864
+ plt.legend()
865
+ st.pyplot(fig)
866
+
867
+ with col2:
868
+ fig = plt.figure()
869
+ plt.plot(model_fit.history['accuracy'], label='train')
870
+ plt.plot(model_fit.history['val_accuracy'], label='val')
871
+ plt.ylabel('accuracy')
872
+ plt.xlabel('epoch')
873
+ plt.legend()
874
+ st.pyplot(fig)
875
+
876
+ if 'nn_submit' not in st.session_state:
877
+ st.session_state.nn_submit = False
878
+
879
+ # if st.session_state.submittedLayers!=[] and st.session_state.nn_type == 'Software':-
880
+ # # container for showing added layers
881
+ # with st.container():
882
+ # st.subheader("Added Layers")
883
+ # show_layers(st.session_state.submittedLayers)
884
+ # reset = st.button('Reset')
885
+
886
+ # # resetting the submittedLayers and so the model too
887
+ # if reset:
888
+ # st.session_state.Smodel = Sequential(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape))
889
+ # st.session_state.submittedLayers = []
890
+
891
+ # optimizer,loss,epochs,batch_size = show_compile_fit()
892
+
893
+ # col1, col2, col3 = st.columns([2,1,2])
894
+ # with col2:
895
+ # submitAll = st.button('Submit all')
896
+
897
+ # # if submitAll:
898
+ # # show_results(st.session_state.Smodel)
899
+
900
+ # if submitAll:
901
+ # st.session_state.model_fit = run_model(st.session_state.Smodel,loss,optimizer,epochs,batch_size)
902
+ # cal_result(st.session_state.Smodel)
903
+ # st.session_state.nn_submit = True
904
+
905
+ # if st.session_state.nn_submit:
906
+ # show_results(st.session_state.model_fit)
907
+
908
+ # if st.session_state.Store!={}:
909
+ # df=pd.DataFrame(st.session_state.Store)
910
+ # st.table(df)
911
+
912
+ if 'setup' not in st.session_state:
913
+ st.session_state.setup = False
914
+ if 'csv' not in st.session_state:
915
+ st.session_state.csv = None
916
+
917
+ def set_hardware_weights(model):
918
+ st.text("")
919
+ st.text("")
920
+ col1,col2 = st.columns(2)
921
+ with col1:
922
+ mem_txt = "Select the memristor "#+str(mem)
923
+ memristor_model = st.radio(mem_txt, ('Joglekar','Prodromakis','Biolek','Zha'),key=mem_txt)
924
+ if memristor_model=='Joglekar' or memristor_model=='Biolek':
925
+ p=st.number_input('Enter p value', value = 1)
926
+ j=1
927
+ if memristor_model=='Prodromakis' or memristor_model=='Zha':
928
+ p=st.number_input('Enter p value', value=7)
929
+ j=st.number_input('Enter j value', value=1)
930
+ Amplitude = st.number_input('Amplitude', value = 1)
931
+ freq = st.number_input('Frequency', value = 1)
932
+ with col2:
933
+ Ron_txt = "Ron"#+str(mem)
934
+ Ron = st.number_input('Set Ron value', min_value=100,max_value=16000, value=100,key=Ron_txt)
935
+ Roff_txt = "Roff"#+str(mem)
936
+ Roff = st.number_input('Set Roff value', min_value=100, max_value=16000, value=16000, key=Roff_txt)
937
+ part_txt = "part"#+str(mem)
938
+ Rint = st.number_input('Set Rint value', min_value=100, max_value=16000, value=11000)
939
+ partition = st.slider('Define the Quatization value here',2,64, key=part_txt)
940
+ sample_rate = st.number_input('Sample Rate', value = 500)
941
+
942
+
943
+ # st.write('Would you like to add some variabilities? Add them below...')
944
+ # Ron_Roff_txt = "Ron_Roff"#+str(mem)
945
+ Ron_Roff_aging = st.checkbox("Ron-Roff Aging")
946
+ c1,c2,c3 = st.columns((1,2,1))
947
+ if Ron_Roff_aging:
948
+ with c2:
949
+ st.caption('Aging value can be positive or negative')
950
+ Ron_aging = st.number_input('Enter aging % (b/w 0-20)',key='ronAge',value=0)
951
+ Roff_aging = st.number_input('Enter aging % (b/w 0-20)',key='roffAge',value=0)
952
+ else:
953
+ Ron_aging = 0
954
+ Roff_aging = 0
955
+
956
+
957
+ c1,c2,c3 = st.columns((1,1,1))
958
+ with c2:
959
+ setup = st.button('Set up Memristor')
960
+ if setup:
961
+ st.session_state.setup = True
962
+
963
+ if setup:
964
+ st.text("")
965
+ st.text("")
966
+
967
+ # Get the current weights of the neural network
968
+ old_weights = model.get_weights()
969
+
970
+ old_weight_array = np.concatenate([arr.flatten() for arr in old_weights])
971
+
972
+ # Calculate the minimum and maximum values of the old weights
973
+ old_weight_min = np.amin(np.abs(old_weight_array))
974
+ old_weight_max = np.amax(np.abs(old_weight_array))
975
+
976
+ lyr=0
977
+ for layer in model.layers:
978
+ lyr += 1
979
+ if layer.__class__.__name__ == 'Dense' or layer.__class__.__name__ =='Conv2D' or layer.__class__.__name__ == 'LSTM':
980
+ try:
981
+ shape = layer.get_weights()[0].shape
982
+ txt = "Weights for the layer "+layer.name+" of shape "+str(shape)
983
+ st.subheader(txt)
984
+
985
+ old_weights = list(layer.get_weights()[0])
986
+ st.session_state.old_weights = []
987
+ st.session_state.old_bias = []
988
+ idx = 0
989
+
990
+ if layer.__class__.__name__ == 'LSTM':
991
+ # old_weights = layer.trainable_weights[0]
992
+ # old_weights = old_weights.numpy()
993
+ # shape = layer.trainable_weights[0].shape
994
+ # old_bias = layer.trainable_weights[1]
995
+ st.session_state.old_weights = old_weights
996
+ st.session_state.new_weights = []
997
+ st.session_state.new_u = []
998
+ st.session_state.old_u = layer.get_weights()[1]
999
+ shape_u = st.session_state.old_u.shape
1000
+ old_bias = layer.get_weights()[2]
1001
+
1002
+ for weight in list(old_weights):
1003
+ Mem = mem.memristor_models(Roff,Ron,Rint,Amplitude,freq,1,sample_rate,p,j,memristor_model)
1004
+ Mem.variability(partition,Ron_aging,Roff_aging)
1005
+ weight = (list(weight))
1006
+ Mem.neural_weight([weight], old_weight_max, old_weight_min)
1007
+ st.session_state.new_weights.append(Mem.new_weights())
1008
+
1009
+ for weight in list(st.session_state.old_u):
1010
+ Mem = mem.memristor_models(Roff,Ron,Rint,Amplitude,freq,1,sample_rate,p,j,memristor_model)
1011
+ Mem.variability(partition,Ron_aging,Roff_aging)
1012
+ weight = (list(weight))
1013
+ Mem.neural_weight([weight], old_weight_max, old_weight_min)
1014
+ st.session_state.new_u.append(Mem.new_weights())
1015
+ else:
1016
+ old_bias = layer.get_weights()[1]
1017
+
1018
+ if layer.__class__.__name__ == 'Conv2D':
1019
+ st.session_state.old_weights = old_weights
1020
+ st.session_state.new_weights = []
1021
+ for row in old_weights:
1022
+ # st.session_state.old_weights.append([])
1023
+ st.session_state.new_weights.append([])
1024
+ for weights in row:
1025
+ for weight in weights:
1026
+ # st.session_state.old_weights[idx].append([weight])
1027
+ Mem = mem.memristor_models(Roff,Ron,Rint,Amplitude,freq,1,sample_rate,p,j,memristor_model)
1028
+ Mem.variability(partition,Ron_aging,Roff_aging)
1029
+ weight = (list(weight))
1030
+ Mem.neural_weight([weight], old_weight_max, old_weight_min)
1031
+ st.session_state.new_weights[idx].append(Mem.new_weights())
1032
+ idx += 1
1033
+ if layer.__class__.__name__ == 'Dense':
1034
+ for row in old_weights:
1035
+ st.session_state.old_weights.append([])
1036
+ for weight in row:
1037
+ # new_w_txt = "Set new weight "+str(memW)+' for '+layer.__class__.__name__+' '+layer.name
1038
+ # new_w = st.number_input(new_w_txt, key=new_w_txt)
1039
+ # set_txt = "set"+str(memW)
1040
+ # memW += 1
1041
+
1042
+ st.session_state.old_weights[idx].append(weight)
1043
+ idx += 1
1044
+ # st.write('***')
1045
+
1046
+ Mem = mem.memristor_models(Roff,Ron,Rint,Amplitude,freq,1,sample_rate,p,j,memristor_model)
1047
+ Mem.variability(partition,Ron_aging,Roff_aging)
1048
+
1049
+ Mem.neural_weight(st.session_state.old_weights, old_weight_max, old_weight_min)
1050
+ st.session_state.new_weights = Mem.new_weights()
1051
+
1052
+ for bias in old_bias:
1053
+
1054
+ # new_b_txt = "Set new bias "+str(memB)+' for '+layer.__class__.__name__+' '+layer.name
1055
+ # new_b = st.number_input(new_b_txt, key=new_b_txt)
1056
+ # set_txt = "setb"+str(memB)
1057
+ # memB += 1
1058
+ #st.write(":heavy_minus_sign:" * 30)
1059
+
1060
+ st.session_state.old_bias.append(bias)
1061
+
1062
+ Mem = mem.memristor_models(Roff,Ron,Rint,Amplitude,freq,1,sample_rate,p,j,memristor_model)
1063
+ Mem.variability(partition,Ron_aging,Roff_aging)
1064
+
1065
+ Mem.neural_weight([st.session_state.old_bias], old_weight_max, old_weight_min)
1066
+ st.session_state.new_bias = Mem.new_weights()[0]
1067
+
1068
+ C1,C2 = st.columns(2)
1069
+ with C1:
1070
+ st.write(layer.name,": Weights", np.array(st.session_state.old_weights))
1071
+ if layer.__class__.__name__ == 'LSTM':
1072
+ st.write(layer.name,":hidden Weights", np.array(st.session_state.old_u))
1073
+ st.write(layer.name,": Biases", np.array(st.session_state.old_bias))
1074
+
1075
+ with C2:
1076
+ st.session_state.new_weights = np.array(st.session_state.new_weights).reshape(shape)
1077
+ st.write(layer.name,": mapped Weights", st.session_state.new_weights)
1078
+ if layer.__class__.__name__ == 'LSTM':
1079
+ st.session_state.new_u = np.array(st.session_state.new_u).reshape(shape_u)
1080
+ st.write(layer.name,":mapped hidden Weights", st.session_state.new_u)
1081
+ st.write(layer.name,": mapped Biases", np.array(st.session_state.new_bias))
1082
+
1083
+
1084
+ # apply = st.button("Apply mapped values",key=lyr)
1085
+ # if apply:
1086
+ st.session_state.new_weights = np.array(st.session_state.new_weights).reshape(shape)
1087
+ if layer.__class__.__name__ == 'LSTM':
1088
+ layer.set_weights([st.session_state.new_weights, st.session_state.new_u, np.array(st.session_state.new_bias)])
1089
+ else:
1090
+ layer.set_weights([st.session_state.new_weights, np.array(st.session_state.new_bias)])
1091
+ # st.success('Successfully applied new mapped wights and biases')
1092
+
1093
+ except Exception as ex:
1094
+ st.error(ex)
1095
+ print(ex)
1096
+
1097
+
1098
+ def get_weights_and_biases(model):
1099
+
1100
+ # Get the current weights of the neural network
1101
+ old_weights = np.array(model.get_weights(), dtype=object)
1102
+ # print(len(old_weights))
1103
+ # print(old_weights)
1104
+ # for i in old_weights:
1105
+ # print(len(i))
1106
+ df = pd.DataFrame(old_weights)
1107
+
1108
+ return df
1109
+
1110
+
1111
+ @st.cache
1112
+ def convert_df(df):
1113
+ # IMPORTANT: Cache the conversion to prevent computation on every rerun
1114
+ return df.to_csv().encode('utf-8')
1115
+
1116
+
1117
+ if st.session_state.submittedLayers!=[]:
1118
+ st.subheader('Added Layers')
1119
+ show_layers(st.session_state.submittedLayers)
1120
+ reset = st.button('Reset')
1121
+
1122
+ # resetting the submittedLayers and so the model too
1123
+ if reset:
1124
+ if st.session_state.snn:
1125
+ st.session_state.model = Sequential(TD(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape)))
1126
+ st.session_state.submittedLayers = []
1127
+ else:
1128
+ st.session_state.model = Sequential(tf.keras.layers.InputLayer(input_shape=st.session_state.ip_shape))
1129
+ st.session_state.submittedLayers = []
1130
+
1131
+
1132
+ optimizer,loss,epochs,batch_size = show_compile_fit()
1133
+
1134
+ col1, col2, col3 = st.columns([2,1,2])
1135
+ with col2:
1136
+ submitAll = st.button('Submit all')
1137
+
1138
+ if submitAll:
1139
+ st.session_state.model_fit = run_model(st.session_state.model,loss,optimizer,epochs,batch_size)
1140
+ cal_result(st.session_state.model)
1141
+ st.session_state.nn_submit = True
1142
+ df = get_weights_and_biases(st.session_state.model)
1143
+ st.session_state.csv = convert_df(df)
1144
+
1145
+ col1, col2, col3 = st.columns([2,2,2])
1146
+ with col2:
1147
+ if st.session_state.csv:
1148
+ st.download_button(
1149
+ label="Download weights as CSV",
1150
+ data= st.session_state.csv,
1151
+ file_name='weights_df.csv',
1152
+ mime='text/csv',
1153
+ )
1154
+
1155
+ if st.session_state.nn_submit:
1156
+ show_results(st.session_state.model_fit)
1157
+ restore = st.button('Restore trained weights')
1158
+ if restore:
1159
+ st.session_state.model.load_weights('Model_Weights.h5')
1160
+
1161
+ if st.session_state.nn_type == 'Hardware':
1162
+ set_hardware_weights(st.session_state.model)
1163
+
1164
+ c1,c2,c3 = st.columns(3)
1165
+ with c2:
1166
+ evaluate = st.button("Evaluate")
1167
+ if evaluate:
1168
+ cal_result(st.session_state.model)
1169
+
1170
+
1171
+ if st.session_state.Store!={}:
1172
+ df=pd.DataFrame(st.session_state.Store)
1173
+ st.table(df)
1174
+
1175
+