| from tensorflow import keras | |
| import numpy as np | |
| import tensorflow as tf | |
| import matplotlib.pyplot as plt | |
| from tensorflow.keras import regularizers | |
| from tensorflow.keras import metrics | |
| import scipy.misc | |
| import os | |
| import numpy as np | |
| from tensorflow.keras.models import * | |
| from tensorflow.keras.layers import * | |
| from tensorflow.keras.optimizers import * | |
| def get_optimizer(): | |
| return Adam(lr=1e-4) | |
| def generator_model(pretrained_weights = None,input_size = (256,256,1),biggest_layer = 512): | |
| inputs = Input(input_size) | |
| conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs) | |
| conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1) | |
| pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) | |
| conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1) | |
| conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2) | |
| pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) | |
| conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2) | |
| conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3) | |
| pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) | |
| conv4 = Conv2D(biggest_layer//2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3) | |
| conv4 = Conv2D(biggest_layer//2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4) | |
| drop4 = Dropout(0.5)(conv4) | |
| pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) | |
| conv5 = Conv2D(biggest_layer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4) | |
| conv5 = Conv2D(biggest_layer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5) | |
| drop5 = Dropout(0.5)(conv5) | |
| up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) | |
| merge6 = concatenate ([drop4,up6]) | |
| conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6) | |
| conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6) | |
| up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) | |
| merge7 = concatenate ([conv3,up7]) | |
| conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7) | |
| conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7) | |
| up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) | |
| merge8 = concatenate ([conv2,up8]) | |
| conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8) | |
| conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8) | |
| up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) | |
| merge9 = concatenate ([conv1,up9]) | |
| conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9) | |
| conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) | |
| conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) | |
| conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9) | |
| model = Model(inputs = inputs, outputs = conv10) | |
| return model | |
| def discriminator_model(input_size = (256,256,1)): | |
| def d_layer(layer_input, filters, f_size=4, bn=True): | |
| d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) | |
| d = LeakyReLU(alpha=0.2)(d) | |
| if bn: | |
| d = BatchNormalization(momentum=0.8)(d) | |
| return d | |
| img_A = Input(input_size) | |
| img_B = Input(input_size) | |
| df=64 | |
| combined_imgs = Concatenate(axis=-1)([img_A, img_B]) | |
| d1 = d_layer(combined_imgs, df, bn=False) | |
| d2 = d_layer(d1, df*2) | |
| d3 = d_layer(d2, df*4) | |
| d4 = d_layer(d3, df*4) | |
| validity = Conv2D(1, kernel_size=4, strides=1, padding='same', activation='sigmoid')(d4) | |
| discriminator = Model([img_A, img_B], validity) | |
| discriminator.compile(loss='mse', optimizer=get_optimizer(), metrics = ['accuracy']) | |
| return discriminator | |
| def get_gan_network(discriminator, generator, input_size = (256,256,1)): | |
| discriminator.trainable = False | |
| gan_input2 = Input(input_size) | |
| x = generator(gan_input2) | |
| valid = discriminator([x,gan_input2]) | |
| gan = Model(inputs=[gan_input2], outputs=[valid,x]) | |
| gan.compile(loss=['mse','binary_crossentropy'],loss_weights=[1, 100], optimizer=get_optimizer(),metrics = ['accuracy']) | |
| return gan | |