panahi commited on
Commit
3c3c2fc
·
1 Parent(s): a75e03c

Upload mainmodel (1).py

Browse files
Files changed (1) hide show
  1. mainmodel (1).py +332 -0
mainmodel (1).py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """MainModel.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1-4IJzCALsOTWJG0nqR1_thQSGLh-Zfm9
8
+
9
+ ## Adversarial AutoEncoder
10
+
11
+ #### Generative model to learn to reproduce the input and check the output is generated from a input in reconstruction process or generated creatively. The idea is combination of GAN (Generative Adversarial Networks) and AE (AutoEncoder) to produce a super generative model.
12
+
13
+ ## Initialization
14
+
15
+ We mounted google drive to access dataset from there.
16
+ """
17
+
18
+ from google.colab import drive
19
+ drive.mount('/content/drive')
20
+
21
+ """Import useful libraries."""
22
+
23
+ import numpy as np
24
+ import tensorflow.keras as ke
25
+ import tensorflow as tf
26
+ from pathlib import Path
27
+ import time
28
+
29
+ """Check GPU availability"""
30
+
31
+ print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
32
+
33
+ """## Preparing Data For Train
34
+
35
+ Read dataset from google drive and load it to `dataset` variable and preprocessing data to prepare them to learn the model with.
36
+ """
37
+
38
+ batchsize = 64
39
+
40
+ directory = '/content/drive/MyDrive/MainModel/train'
41
+
42
+ dataset = tf.keras.utils.image_dataset_from_directory(
43
+ directory,
44
+ labels=None,
45
+ batch_size=batchsize,
46
+ image_size=(224, 224)
47
+ )
48
+
49
+ dataset = dataset.unbatch()
50
+ dataset = dataset.batch(batchsize, drop_remainder=True)
51
+
52
+ """## Make Model
53
+
54
+ Set latent vector size to use in the model.
55
+ """
56
+
57
+ latent_vector_size = 256
58
+
59
+ """Make the builder function of encoder part of model."""
60
+
61
+ def build_model_enc():
62
+ model = ke.models.Sequential()
63
+ model.add(ke.layers.Conv2D(64, (5, 5), strides=(2, 2), activation="relu", padding='same', input_shape=(224, 224, 3)))
64
+ model.add(ke.layers.Conv2D(128, (5, 5), strides=(2, 2), activation="relu", padding='same'))
65
+ model.add(ke.layers.Conv2D(256, (5, 5), strides=(2, 2), activation="relu", padding='same'))
66
+ model.add(ke.layers.Conv2D(512, (5, 5), strides=(2, 2), activation="relu", padding='same'))
67
+ model.add(ke.layers.Conv2D(512, (5, 5), strides=(2, 2), activation="relu", padding='same'))
68
+ model.add(ke.layers.Flatten())
69
+ model.add(ke.layers.Dense(512, activation="relu"))
70
+ model.add(ke.layers.Dense(latent_vector_size, activation="relu"))
71
+ return model
72
+
73
+ """Make the builder function of decoder part of model."""
74
+
75
+ def build_model_dec():
76
+ model = ke.models.Sequential()
77
+ model.add(ke.layers.Dense(512, input_shape=(latent_vector_size,)))
78
+ model.add(ke.layers.Dense(25088))
79
+ model.add(ke.layers.Reshape((7, 7, 512)))
80
+ model.add(ke.layers.Conv2D(512, (5, 5), activation="relu", padding="same"))
81
+ model.add(ke.layers.UpSampling2D())
82
+ model.add(ke.layers.Conv2D(512, (5, 5), activation="relu", padding="same"))
83
+ model.add(ke.layers.UpSampling2D())
84
+ model.add(ke.layers.Conv2D(256, (5, 5), activation="relu", padding="same"))
85
+ model.add(ke.layers.UpSampling2D())
86
+ model.add(ke.layers.Conv2D(128, (5, 5), activation="relu", padding="same"))
87
+ model.add(ke.layers.UpSampling2D())
88
+ model.add(ke.layers.Conv2D(64, (5, 5), activation="relu", padding="same"))
89
+ model.add(ke.layers.UpSampling2D())
90
+ model.add(ke.layers.Conv2D(3, (5, 5), activation="relu", padding="same"))
91
+ return model
92
+
93
+ """Make the builder function of discriminator part of model."""
94
+
95
+ def build_model_disc():
96
+ model = ke.models.Sequential()
97
+ model.add(ke.layers.Dense(512, activation="relu", input_shape=(latent_vector_size,)))
98
+ model.add(ke.layers.Dense(256, activation="relu"))
99
+ model.add(ke.layers.Dense(1, activation="sigmoid"))
100
+ return model
101
+
102
+ """Make the builder function of the all parts of model."""
103
+
104
+ def build_model_aae():
105
+ model_encoder = build_model_enc()
106
+ model_decoder = build_model_dec()
107
+ model_discriminator = build_model_disc()
108
+ model_autoencoder = ke.models.Sequential()
109
+ model_autoencoder.add(model_encoder)
110
+ model_autoencoder.add(model_decoder)
111
+ model_encoder__generator__discriminator = ke.models.Sequential()
112
+ model_encoder__generator__discriminator.add(model_encoder)
113
+ model_encoder__generator__discriminator.add(model_discriminator)
114
+ return model_encoder, model_decoder, model_discriminator, model_autoencoder, model_encoder__generator__discriminator
115
+
116
+ """Build the model, compile it and then check parts of model.
117
+
118
+ Then show the model structure.
119
+ """
120
+
121
+ model_enc, model_dec, model_disc, model_ae, model_enc_disc = build_model_aae()
122
+ model_enc.summary()
123
+ model_dec.summary()
124
+ model_disc.summary()
125
+ model_ae.summary()
126
+ model_enc_disc.summary()
127
+ model_disc.compile(optimizer=ke.optimizers.Adam(lr=1e-4), loss="binary_crossentropy", metrics=[tf.keras.metrics.BinaryAccuracy()])
128
+ model_enc_disc.compile(optimizer=ke.optimizers.Adam(lr=1e-4), loss="binary_crossentropy", metrics=[tf.keras.metrics.BinaryAccuracy()])
129
+ model_ae.compile(optimizer=ke.optimizers.Adam(lr=1e-3), loss=ke.losses.MeanSquaredError())
130
+
131
+ """## Trainable function
132
+
133
+ Because our model has some complexity to train and has 2 seperate parts (decoder and discriminator) connected to same part (encoder) which makes 2 paths to train the model and optimize the model (autoencoder and adversarial), we should lock some parts of model for traing other parts and change this option a lot. So we defined a function to set a layer and all it's inside layers trainable or untrainable.
134
+ """
135
+
136
+ def settrainable(model, toset):
137
+ for layer in model.layers:
138
+ layer.trainable = toset
139
+ model.trainable = toset
140
+
141
+ """## Inception Score"""
142
+
143
+ import math
144
+ from keras.applications.inception_v3 import InceptionV3
145
+ from keras.applications.inception_v3 import preprocess_input
146
+
147
+ inception_model = InceptionV3()
148
+
149
+
150
+ def inception_score(images, n_split=10, eps=1E-16):
151
+ images = tf.image.resize(images, (299, 299))
152
+ from tensorflow.python.ops.numpy_ops import np_config
153
+ np_config.enable_numpy_behavior()
154
+ processed = images.astype('float32')
155
+ processed = preprocess_input(images)
156
+ yhat = inception_model.predict(processed)
157
+ scores = list()
158
+ n_part = math.floor(images.shape[0] / n_split)
159
+ for i in range(n_split):
160
+ ix_start, ix_end = i * n_part, i * n_part + n_part
161
+ p_yx = yhat[ix_start:ix_end]
162
+ p_y = np.expand_dims(p_yx.mean(axis=0), 0)
163
+ kl_d = p_yx * (np.log(p_yx + eps) - np.log(p_y + eps))
164
+ sum_kl_d = kl_d.sum(axis=1)
165
+ avg_kl_d = np.mean(sum_kl_d)
166
+ is_score = np.exp(avg_kl_d)
167
+ scores.append(is_score)
168
+ is_avg, is_std = np.mean(scores), np.std(scores)
169
+ return is_avg, is_std
170
+
171
+ """## Train the model
172
+
173
+ In this part model is trained based on the model defined and inputs loaded.
174
+
175
+ The default batch size is seted to 64 and epoch numbers to 20. In training each batch of each epoch at the first we train the autoencoder, then discriminator and then encoder-discriminator (adversarial part).
176
+ """
177
+
178
+ epochs = 20
179
+
180
+ datasetnumpy = dataset.map(lambda x : (x, x))
181
+ datasetnumpy2 = dataset.map(lambda x : (x, np.ones((batchsize, 1))))
182
+
183
+ enc_path = '/content/drive/MyDrive/MainModel/model_enc'
184
+ dec_path = '/content/drive/MyDrive/MainModel/model_dec'
185
+ disc_path = '/content/drive/MyDrive/MainModel/model_disc'
186
+ ae_path = '/content/drive/MyDrive/MainModel/model_ae'
187
+ enc_disc_path = '/content/drive/MyDrive/MainModel/model_enc_disc'
188
+ log_path = '/content/drive/MyDrive/MainModel/log.txt'
189
+
190
+ initial_epoch = -1
191
+
192
+ my_file = Path(log_path)
193
+ if my_file.is_file():
194
+ with open(log_path, 'r') as file:
195
+ initial_epoch = int(file.readline())
196
+ print(f'log found. epoch: {initial_epoch}')
197
+ model_enc = ke.models.load_model(enc_path)
198
+ model_dec = ke.models.load_model(dec_path)
199
+ model_disc = ke.models.load_model(disc_path)
200
+ model_ae = ke.models.load_model(ae_path)
201
+ model_enc_disc = ke.models.load_model(enc_disc_path)
202
+ else:
203
+ print('log not found.')
204
+
205
+ for epochnumber in range(epochs):
206
+ if epochnumber < initial_epoch:
207
+ continue
208
+ batchcounter = 0
209
+ batch_size = 0
210
+ for batchtensor in dataset:
211
+ batchcounter = batchcounter + 1;
212
+ settrainable(model_ae, True)
213
+ settrainable(model_enc, True)
214
+ settrainable(model_dec, True)
215
+ batch = batchtensor.numpy()
216
+ batch_size = batch.shape[0]
217
+ model_ae.train_on_batch(batch, batch)
218
+ settrainable(model_disc, True)
219
+ batchpred = model_enc.predict(batch)
220
+ fakepred = np.random.standard_normal((batch_size, latent_vector_size))
221
+ discbatch_x = np.concatenate([batchpred, fakepred])
222
+ discbatch_y = np.concatenate([np.zeros(batch_size), np.ones(batch_size)])
223
+ model_disc.train_on_batch(discbatch_x, discbatch_y)
224
+ settrainable(model_enc_disc, True)
225
+ settrainable(model_enc, True)
226
+ settrainable(model_disc, False)
227
+ model_enc_disc.train_on_batch(batch, np.ones(batch_size))
228
+ if batchcounter % 100 == 0:
229
+ model_enc.save(enc_path)
230
+ model_dec.save(dec_path)
231
+ model_disc.save(disc_path)
232
+ model_ae.save(ae_path)
233
+ model_enc_disc.save(enc_disc_path)
234
+ with open(log_path, 'w') as file:
235
+ file.write(f'{epochnumber}\n{batchcounter}')
236
+ elif batchcounter % 20 == 0:
237
+ print(batchcounter)
238
+ with open(log_path, 'w') as file:
239
+ file.write(f'{epochnumber}')
240
+ ae_evaluation = model_ae.evaluate(datasetnumpy, batch_size=batchsize)
241
+ adversarial_evaluation = model_enc_disc.evaluate(datasetnumpy2, batch_size=batchsize)
242
+ print('Reconstruction Loss:', ae_evaluation)
243
+ print('Adverserial Loss:', adversarial_evaluation[0])
244
+ print('Adverserial Accuracy:', adversarial_evaluation[1])
245
+ if batchcounter >= 1000:
246
+ num_batch = 100
247
+ else:
248
+ num_batch = 10
249
+ fakepred = np.random.standard_normal((batch_size * num_batch, latent_vector_size))
250
+ generated_data = model_dec.predict(fakepred)
251
+ inc_score = inception_score(generated_data)
252
+ print(f'Inception score for decoder is: {inc_score}')
253
+
254
+ import os
255
+ try:
256
+ my_file = Path(log_path)
257
+ if my_file.is_file():
258
+ os.remove(log_path)
259
+ except:
260
+ print('Exception occured.')
261
+
262
+ ae_evaluation = model_ae.evaluate(datasetnumpy, batch_size=batchsize)
263
+ adversarial_evaluation = model_enc_disc.evaluate(datasetnumpy2, batch_size=batchsize)
264
+ print('Reconstruction Loss:', ae_evaluation)
265
+ print('Adverserial Loss:', adversarial_evaluation[0])
266
+ print('Adverserial Accuracy:', adversarial_evaluation[1])
267
+ if batchcounter >= 1000:
268
+ num_batch = 100
269
+ else:
270
+ num_batch = 10
271
+ fakepred = np.random.standard_normal((batch_size * num_batch, latent_vector_size))
272
+ generated_data = model_dec.predict(fakepred)
273
+ inc_score = inception_score(generated_data)
274
+ print(f'Inception score for decoder is: {inc_score}')
275
+ model_enc.save(enc_path)
276
+ model_dec.save(dec_path)
277
+ model_disc.save(disc_path)
278
+ model_ae.save(ae_path)
279
+ model_enc_disc.save(enc_disc_path)
280
+
281
+ """## Preparing Data For Evaluation | Test
282
+
283
+ Read dataset from google drive and load it to `dataset` variable and preprocessing data to prepare them to learn the model with.
284
+ """
285
+
286
+ batchsize = 64
287
+
288
+ directory = '/content/drive/MyDrive/MainModel/test'
289
+
290
+ dataset = tf.keras.utils.image_dataset_from_directory(
291
+ directory,
292
+ labels=None,
293
+ batch_size=batchsize,
294
+ image_size=(224, 224)
295
+ )
296
+
297
+ dataset = dataset.unbatch()
298
+ dataset = dataset.batch(batchsize, drop_remainder=True)
299
+
300
+ """## Evaluation | Test"""
301
+
302
+ enc_path = '/content/drive/MyDrive/MainModel/model_enc'
303
+ dec_path = '/content/drive/MyDrive/MainModel/model_dec'
304
+ disc_path = '/content/drive/MyDrive/MainModel/model_disc'
305
+ ae_path = '/content/drive/MyDrive/MainModel/model_ae'
306
+ enc_disc_path = '/content/drive/MyDrive/MainModel/model_enc_disc'
307
+ log_path = '/content/drive/MyDrive/MainModel/log.txt'
308
+
309
+ model_enc = ke.models.load_model(enc_path)
310
+ model_dec = ke.models.load_model(dec_path)
311
+ model_disc = ke.models.load_model(disc_path)
312
+ model_ae = ke.models.load_model(ae_path)
313
+ model_enc_disc = ke.models.load_model(enc_disc_path)
314
+
315
+ with open(log_path, 'r') as file:
316
+ initial_epoch = int(file.readline())
317
+ print(f'log found. Epochs model trained: {initial_epoch}')
318
+
319
+ datasetnumpy = dataset.map(lambda x : (x, x))
320
+ datasetnumpy2 = dataset.map(lambda x : (x, np.ones((batchsize, 1))))
321
+ ae_evaluation = model_ae.evaluate(datasetnumpy, batch_size=batchsize)
322
+ adversarial_evaluation = model_enc_disc.evaluate(datasetnumpy2, batch_size=batchsize)
323
+ print('Reconstruction Loss:', ae_evaluation)
324
+ print('Adverserial Accuracy(positive):', adversarial_evaluation[1])
325
+ fakepred = np.random.standard_normal((batchsize, latent_vector_size))
326
+ fake_pred = model_disc.predict(fakepred)
327
+ fake_disc_score = fake_pred.round() == 0
328
+ fake_disc_score = fake_disc_score.sum()
329
+ print('Adverserial Accuracy(negative):', fake_disc_score / batchsize)
330
+ generated_data = model_dec.predict(fakepred)
331
+ inc_score = inception_score(generated_data)
332
+ print(f'Inception score for decoder is: {inc_score}')