# example of face detection with mtcnn from __future__ import print_function, division from matplotlib import pyplot from PIL import Image from numpy import asarray from mtcnn.mtcnn import MTCNN import cv2 from mask_the_face import * import numpy as np import cv2 from tensorflow.keras.regularizers import l2 import pathlib import tensorflow from tensorflow import keras from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense,Dropout,BatchNormalization import tensorflow.keras import pathlib import tensorflow as tf from tensorflow import keras from tensorflow.keras.preprocessing.image import ImageDataGenerator import tensorflow.keras.utils as utils from tensorflow.keras.optimizers import Adam as adam from tensorflow.keras.optimizers import SGD from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.optimizers import Adagrad from tensorflow.keras.callbacks import EarlyStopping ,ModelCheckpoint import tensorflow as tf from tensorflow.keras import Model import matplotlib.pyplot as plt import numpy as np from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, GlobalAveragePooling2D, Dropout, Input # import keras_tuner as kt from tensorflow.keras.applications import InceptionResNetV2 from tensorflow.keras import layers from tensorflow.keras.applications.inception_resnet_v2 import preprocess_input from matplotlib import pyplot from numpy import asarray import copy import random # from mtcnn.mtcnn import MTCNN import glob import gradio as gr from tensorflow.keras.regularizers import l2 from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.layers import UpSampling2D, Conv2D from tensorflow.keras.models import Sequential, Model from tensorflow.keras.optimizers import Adam from tensorflow.keras import losses from tensorflow.keras.utils import to_categorical import tensorflow.keras.backend as K from tensorflow.keras.utils import plot_model import matplotlib.pyplot as plt import shutil import numpy as np from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.applications import VGG16 def ssim_l1_loss(gt, y_pred, max_val=2.0, l1_weight=1.0): """ Computes SSIM loss with L1 normalization @param gt: Ground truth image @param y_pred: Predicted image @param max_val: Maximal SSIM value @param l1_weight: Weight of L1 normalization @return: SSIM L1 loss """ ssim_loss = 1 - tf.reduce_mean(tf.image.ssim(gt, y_pred, max_val=max_val)) l1 = tf.keras.metrics.mean_absolute_error(gt, y_pred) return ssim_loss + tf.cast(l1 * l1_weight, tf.float32) class GAN(): def __init__(self,Xpointers,Ypointers,valX,valY,BigBatchSize,BinaryEnabled=False,BigBatchEnable=False,loading=True,printModel=False): self.Xpoint= Xpointers self.Ypoint= Ypointers self.X='' self.Y='' self.Binary='' self.DataSize=BigBatchSize self.genEnable=BigBatchEnable self.loading=loading self.PrintOut=printModel if self.loading: self.valX=self.get_all_images(valX) self.valY=self.get_all_images(valY) self.BestValLoss=1000 self.BinaryEnabled=BinaryEnabled if self.loading: if self.BinaryEnabled: self.Binary=self.GetBinary(self.valY,self.valX) self.ChangeToGreen('val') optimizer = Adam(0.0010,) # # Build and compile the discriminator self.discriminator_glo = self.build_discriminator() self.discriminator_glo.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) self.discriminator_loc = self.build_local_discriminator() self.discriminator_loc.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) self.generator,self.predictor = self.build_generator() GenOut = self.generator.output valid = self.discriminator_glo(GenOut[0]) self.discriminator_glo.trainable = False valid2 = self.discriminator_loc(GenOut) self.discriminator_loc.trainable = False self.combined = Model(self.generator.input , [self.generator.output[0], valid,valid2]) self.combined.compile(loss=[ssim_l1_loss, 'binary_crossentropy','binary_crossentropy'], loss_weights=[0.35, 0.50,1], optimizer=optimizer) if self.PrintOut: self.generator.summary() self.discriminator_loc.summary() self.discriminator_glo.summary() self.combined.summary() if self.loading: self.getBigBatch() def GetBinary(self,Org,Masked): allBinary=[] for i,x in enumerate(Masked): diff = cv2.absdiff(Org[i], Masked[i]) gray=cv2.cvtColor(diff,cv2.COLOR_BGR2GRAY) _, diff2 = cv2.threshold(gray, 9, 255, cv2.THRESH_BINARY) img_median = cv2.medianBlur(diff2, 3) img_median = img_median/255 allBinary.append(img_median) return np.array(allBinary) def get_all_images(self,classes): allImages=[] for i,sample in enumerate(classes[:]): org_img = cv2.imread(sample) #org_img = org_img.astype('float32') org_img = cv2.resize(org_img, (256, 256)) org_img=cv2.cvtColor(org_img,cv2.COLOR_BGR2RGB) # org_img= org_img/127.5 - 1 # np.append(allImages, org_img) allImages.append(org_img) return np.array(allImages) def ChangeToGreen(self,data='train'): if data=='train': for i,x in enumerate(self.X): self.X[i][self.Binary[i]!=0]=(1,255,1) else: for i,x in enumerate(self.valX): self.valX[i][self.Binary[i]!=0]=(1,255,1) def getBigBatch(self): del self.X del self.Y del self.Binary if self.genEnable: idx = np.random.randint(0, self.Xpoint.shape[0], self.DataSize) currentX=self.Xpoint[idx] currentY=self.Ypoint[idx] self.X=self.get_all_images(currentX) self.Y=self.get_all_images(currentY) else: self.X=self.get_all_images(self.Xpoint) self.Y=self.get_all_images(self.Ypoint) if self.BinaryEnabled: self.Binary=self.GetBinary(self.Y,self.X) self.ChangeToGreen('train') self.Binary=self.Binary.reshape(self.Binary.shape[0],256,256,1) def downsample(self,filters, size, apply_batchnorm=True): result = tf.keras.Sequential() result.add( tf.keras.layers.Conv2D(filters, size, strides=2, padding='same',)) result.add(tf.keras.layers.ReLU()) result.add( tf.keras.layers.Conv2D(filters, size, padding='same',)) result.add(tf.keras.layers.ReLU()) if apply_batchnorm: result.add(tf.keras.layers.BatchNormalization()) return result def upsample(self,filters, size, apply_dropout=False): result = tf.keras.Sequential() result.add( tf.keras.layers.Conv2DTranspose(filters, size, strides=2, padding='same')) result.add(tf.keras.layers.ReLU()) result.add( tf.keras.layers.Conv2DTranspose(filters, size, padding='same')) result.add(tf.keras.layers.ReLU()) result.add(tf.keras.layers.BatchNormalization()) if apply_dropout: result.add(tf.keras.layers.Dropout(0.2)) return result def build_generator(self): inputs = tf.keras.layers.Input(shape=[256, 256, 3]) binary= tf.keras.layers.Input(shape=[256, 256, 1]) down_stack = [ self.downsample(128, 3, apply_batchnorm=False), # (batch_size, 128, 128, 64) self.downsample(256, 3), # (batch_size, 64, 64, 128) self.downsample(256, 3), # (batch_size, 64, 64, 128) self.downsample(256, 3), # (batch_size, 64, 64, 128) self.downsample(256, 3), # (batch_size, 32, 32, 256) self.downsample(512, 3), # (batch_size, 32, 32, 256) self.downsample(512, 3), # (batch_size, 8, 8, 512) ] up_stack = [ self.upsample(512, 3, apply_dropout=True), # (batch_size, 8, 8, 1024) self.upsample(512, 3), # (batch_size, 64, 64, 256) self.upsample(256, 3,apply_dropout=True), # (batch_size, 64, 64, 256) self.upsample(256, 3), # (batch_size, 64, 64, 256) self.upsample(256, 3,), # (batch_size, 64, 64, 256) self.upsample(256, 3), # (batch_size, 64, 64, 256) self.upsample(128, 3,), # (batch_size, 128, 128, 128) ] down_stack2 = [ self.downsample(128, 5, apply_batchnorm=False), # (batch_size, 128, 128, 64) self.downsample(128, 5), # (batch_size, 64, 64, 128) self.downsample(256, 5), # (batch_size, 32, 32, 256) self.downsample(256, 5), # (batch_size, 32, 32, 256) self.downsample(256, 5), # (batch_size, 32, 32, 256) self.downsample(512, 5), # (batch_size, 8, 8, 512) ] up_stack2 = [ self.upsample(512, 5, apply_dropout=True), # (batch_size, 8, 8, 1024) self.upsample(256, 5), # (batch_size, 64, 64, 256) self.upsample(256, 5,apply_dropout=True), # (batch_size, 64, 64, 256) self.upsample(256, 5), # (batch_size, 64, 64, 256) self.upsample(128, 5,), # (batch_size, 64, 64, 256) self.upsample(128, 5), # (batch_size, 128, 128, 128) ] initializer = tf.random_normal_initializer(0., 0.02) last = tf.keras.layers.Conv2DTranspose(3, 3, strides=2, padding='same', name='GenOut', activation='tanh') # (batch_size, 256, 256, 3) last2 = tf.keras.layers.Conv2DTranspose(3, 3, strides=2, padding='same', name='GenOut2', activation='tanh') # (batch_size, 256, 256, 3) x = inputs # Downsampling through the model skips = [] for down in down_stack: x = down(x) skips.append(x) skips = reversed(skips[:-1]) # Upsampling and establishing the skip connections for up, skip in zip(up_stack, skips): x = up(x) x = tf.keras.layers.Concatenate()([x, skip]) x = last(x) y = inputs # Downsampling through the model skips = [] for down in down_stack2: y = down(y) skips.append(y) skips = reversed(skips[:-1]) # Upsampling and establishing the skip connections for up, skip in zip(up_stack2, skips): y= up(y) y = tf.keras.layers.Concatenate()([y, skip]) y = last2(y) z= tf.keras.layers.Average()([x,y]) model1=tf.keras.Model(inputs=[inputs,binary], outputs=[z,binary]) model2=tf.keras.Model(inputs=inputs, outputs=z) return model1,model2 def build_discriminator(self): inputs = Input(shape=[256, 256, 3]) facenetmodel = Flatten() # facenetmodel.load_weights('/content/drive/MyDrive/facenet_keras_weights.h5') # for layer in facenetmodel.layers[:-50]: # layer.trainable = False # Augment data. augmented = keras.Sequential([layers.Resizing(160, 160),],name="data_augmentation",)(inputs) # This is 'bootstrapping' a new top_model onto the pretrained layers. top_model = facenetmodel(augmented) top_model = Dropout(0.5)(top_model) top_model = BatchNormalization()(top_model) # top_model = Flatten(name="flatten")(top_model) output_layer = Dense(1, activation='sigmoid')(top_model) return Model(inputs=inputs, outputs=output_layer,name='Discriminator') def build_local_discriminator(self): img = Input(shape=[256, 256, 3]) binary = Input(shape=[256, 256, 1]) bitAND=tf.keras.layers.Lambda(lambda x: tf.math.multiply(x[0], x[1]))([img,binary]) facenetmodel = Flatten() # facenetmodel.load_weights('/content/drive/MyDrive/facenet_keras_weights.h5') # for layer in facenetmodel.layers[:-50]: # layer.trainable = False # Augment data. augmented = keras.Sequential([layers.Resizing(160, 160),],name="data_augmentation",)(bitAND) # This is 'bootstrapping' a new top_model onto the pretrained layers. top_model = facenetmodel(augmented) top_model = Dropout(0.5)(top_model) top_model = BatchNormalization()(top_model) # top_model = Flatten(name="flatten")(top_model) output_layer = Dense(1, activation='sigmoid')(top_model) return Model(inputs=[img,binary], outputs=output_layer,name='Discriminator_local') def train(self,epochs,batch_size,imagesSavePath,modelPath, sample_interval=50,BigBatchInterval=1000,modelInterval=50): xVal=self.valX/127.5 - 1 yVal=self.valY/127.5 - 1 # Adversarial ground truths valid = np.ones((batch_size, 1)) fake = np.zeros((batch_size, 1)) valid = np.ones((batch_size, 1)) for epoch in range(epochs): # Select a random batch of images idx = np.random.randint(0, self.X.shape[0], batch_size) masked_imgs = self.X[idx] org_imgs= self.Y[idx] masked_imgs = masked_imgs /127.5 - 1. org_imgs= org_imgs /127.5 - 1. if self.BinaryEnabled: binary= self.Binary[idx] org_local= tf.math.multiply(org_imgs, binary) gen_missing = self.generator.predict([masked_imgs,binary]) # Train the discriminator d_loss_real_glo = self.discriminator_glo.train_on_batch(org_imgs, valid) d_loss_fake_glo = self.discriminator_glo.train_on_batch(gen_missing[0], fake) d_loss_glo = 0.5 * np.add(d_loss_real_glo, d_loss_fake_glo) d_loss_real_loc = self.discriminator_loc.train_on_batch([org_imgs,binary], valid) d_loss_fake_loc = self.discriminator_loc.train_on_batch(gen_missing, fake) d_loss_loc = 0.5 * np.add(d_loss_real_loc, d_loss_fake_loc) # --------------------- # Train Generator # --------------------- # self.combined.layers[-1].trainable = False g_loss = self.combined.train_on_batch([masked_imgs,binary], [org_imgs, valid,valid]) validx = np.random.randint(0, 500, 3) val_pred = self.predictor.predict(xVal[validx]) val_loss=ssim_l1_loss(yVal[validx].astype('float32'),val_pred) val_loss=np.average(val_loss) # Plot the progress print ("%d [G loss: %f,mse:%f] [val_loss:%f]" % (epoch, g_loss[0], g_loss[1],val_loss)) # Plot the progress if epoch!=0: if epoch % 100 == 0: self.combined.save_weights('/content/drive/MyDrive/combinedModel_loc12.h5') # If at save interval => save generator weights # if epoch!=0: # if epoch % modelInterval == 0: # if val_loss save generated image samples if epoch % sample_interval == 0: idx = np.random.randint(0, self.X.shape[0], 6) val_idx = np.random.randint(0, 499, 2) val_reals= self.valY[val_idx] val_imgs = self.valX[val_idx] reals= self.Y[idx] imgs = self.X[idx] self.sample_images(epoch, imgs,reals,imagesSavePath,val_reals,val_imgs) #Big Batch Gen if self.genEnable: if epoch!=0: if epoch % BigBatchInterval == 0: self.getBigBatch() def sample_images(self, epoch, imgs,reals,savepath,val_reals,val_imgs): r, c = 3, 8 imgs=imgs/127.5 -1. val_imgs=val_imgs/127.5 -1. gen_missing = self.predictor.predict(imgs) val_missing = self.predictor.predict(val_imgs) imgs = 0.5 * imgs + 0.5 val_imgs = 0.5 * val_imgs + 0.5 # reals= 0.5* reals +0.5 gen_missing=0.5*gen_missing+0.5 val_missing=0.5*val_missing+0.5 imgs=np.concatenate((imgs,val_imgs), axis=0) gen_missing=np.concatenate((gen_missing,val_missing), axis=0) reals=np.concatenate((reals,val_reals), axis=0) fig, axs = plt.subplots(r, c,figsize=(50,50)) for i in range(c): axs[0,i].imshow(imgs[i, :,:]) axs[0,i].axis('off') axs[1,i].imshow(reals[i, :,:]) axs[1,i].axis('off') axs[2,i].imshow(gen_missing[i, :,:]) axs[2,i].axis('off') fig.savefig(savepath+"%d.png" % epoch) plt.close() GAN_Model = GAN(Xpointers=None,Ypointers=None,valX=None,valY=None, BigBatchSize=50,BigBatchEnable=True,BinaryEnabled=True,loading=False) GAN_Model.predictor.load_weights('DemoPredictor2.h5') def extract_face(photo, required_size=(256, 256),incr=110): # load image from file pixels = photo print(pixels.shape) maxH=(pixels.shape[0]) maxW=(pixels.shape[1]) if (pixels.shape[-1])>3 or (pixels.shape[-1])<3: image = Image.fromarray(pixels) return image # create the detector, using default weights detector = MTCNN() # detect faces in the image results = detector.detect_faces(pixels) if not results: image = Image.fromarray(pixels) image = image.resize(required_size) return image # extract the bounding box from the first face x1, y1, width, height = results[0]['box'] x2, y2 = x1 + width, y1 + height if y1-incr<=0: y1=0 else : y1=y1-incr if x1-incr<=0: x1=0 else : x1=x1-incr if y2+incr>=maxH: y2=maxH else : y2=y2+incr if x2+incr>=maxW: x2=maxW else : x2=x2+incr # extract the face face = pixels[y1:int(y2), int(x1):int(x2)] # resize pixels to the model size image = Image.fromarray(face) image = image.resize(required_size) return image def GetBinary_test(Org,Masked): allBinary=[] for i,x in enumerate(Masked): diff = cv2.absdiff(Org, Masked) gray=cv2.cvtColor(diff,cv2.COLOR_RGB2GRAY) _, diff2 = cv2.threshold(gray, 9, 255, cv2.THRESH_BINARY) img_median = cv2.medianBlur(diff2, 3) img_median = img_median/255 allBinary.append(img_median) return np.array(allBinary) def ChangeToGreen_test(X,Binary): X[Binary[0]!=0]=(1,255,1) def predictImage_masked(GANmodel,groundTruth,masked): TestX=masked.copy() Testy=groundTruth.copy() Binary=GetBinary_test(Testy,TestX) ChangeToGreen_test(TestX,Binary) imgs=TestX/127.5 -1. Testy=Testy/255 gen_missing = GANmodel.predictor.predict(imgs[None,...]) gen_missing=0.5*gen_missing+0.5 psnr2 = tf.image.psnr(Testy.astype('float32'),gen_missing, max_val=1.0) ssim=tf.image.ssim(Testy.astype('float32'), gen_missing, max_val=1) Mssim=np.average(ssim) Mpsnr=np.average(psnr2) I = gen_missing*255 # or any coefficient I = I.astype(np.uint8) I = cv2.normalize(I, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) return (I,Mpsnr,Mssim) def grid_display(list_of_images, list_of_titles=[], no_of_columns=2, figsize=(10,10)): fig = plt.figure(figsize=figsize) column = 0 for i in range(len(list_of_images)): column += 1 # check for end of column and create a new figure if column == no_of_columns+1: fig = plt.figure(figsize=figsize) column = 1 fig.add_subplot(1, no_of_columns, column) plt.imshow(list_of_images[i]) plt.axis('off') if len(list_of_titles) >= len(list_of_images): plt.title(list_of_titles[i]) # paths = r"C:\Users\MrSin\Downloads\images\*.jpg" # import glob # for filepath in glob.iglob(paths): # print(filepath) # org_img = cv2.imread(filepath) def ExecutePipline(img): im = Image.fromarray(img.astype('uint8'), 'RGB') org_img=np.array(im) # plt.imshow(org_img) errorPNG=cv2.imread('error.jpg',) errorPNG=errorPNG[...,::-1] img2=extract_face(org_img,incr=150) cropped = np.array(img2) open_cv_image = cropped[:, :, ::-1].copy() masked1=maskThisImages(open_cv_image) if len(masked1)==0: img2=extract_face(org_img,incr=165) cropped = np.array(img2) open_cv_image = cropped[:, :, ::-1].copy() masked1=maskThisImages(open_cv_image) if len(masked1)==0: img2=extract_face(org_img,incr=180) cropped = np.array(img2) open_cv_image = cropped[:, :, ::-1].copy() masked1=maskThisImages(open_cv_image) if len(masked1)==0: img2=extract_face(org_img,incr=200) cropped = np.array(img2) open_cv_image = cropped[:, :, ::-1].copy() masked1=maskThisImages(open_cv_image) if len(masked1)==0: img2=extract_face(org_img,incr=500) cropped = np.array(img2) open_cv_image = cropped[:, :, ::-1].copy() masked1=maskThisImages(open_cv_image) if len(masked1)==0: return np.zeros((256,256,3)),errorPNG,np.zeros((256,256,3)) masked2=cv2.cvtColor(masked1,cv2.COLOR_BGR2RGB) # output1 = masked2*255 # or any coefficient # output1 = output1.astype(np.uint8) # output1 = cv2.normalize(output1, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) # plt.imshow(output1) # output1= Image.fromarray(output1) results,psnr,ssim=predictImage_masked(GAN_Model,cropped,masked2) return cropped,masked2,results[0] #, # paths = r"C:\Users\MrSin\Downloads\images\*.jpg" # import glob # for filepath in glob.iglob(paths): # print(filepath) # org_img = cv2.imread(filepath) # org_img=cv2.cvtColor(org_img,cv2.COLOR_BGR2RGB) # img=extract_face(org_img) # cropped = np.array(img) # #output 1^ # open_cv_image = cropped[:, :, ::-1].copy() # masked=maskThisImages(open_cv_image) # cv2.imwrite('mytestmasked.jpg',masked) # masked=cv2.cvtColor(masked,cv2.COLOR_BGR2RGB) # #output 2^ # print(masked.shape) # results,psnr,ssim=predictImage_masked_model2(GAN_Model,cropped,masked) # displayResult = np.array(results[0]) # #output 2 results[0]^ # titles = ["groundtruth", # "Masked", # "Generated", ] # images = [cropped,masked,results[0]] # grid_display(images, titles, 3, (15,15)) # titles = ["groundtruth", # "Masked", # "Generated", ] # org_img = cv2.imread('mytestmasked.jpg') # org_img=cv2.cvtColor(org_img,cv2.COLOR_BGR2RGB) # results=predictImageOnly(GAN_Model,org_img) # images = [cropped,masked,results[0]] # grid_display(images, titles, 3, (15,15)) # imagein = gr.Image() # maskedOut = gr.Image(type='numpy',label='Masked (Model-input)') # crop = gr.Image(type='numpy',label='cropped') # genOut= gr.Image(type='numpy',label='Unmasked Output') # gr.Interface( # ExecutePipline, # inputs=imagein, # outputs=[crop,maskedOut,genOut], # title="Face Un-Masking", # description="Compare 2 state-of-the-art machine learning models",).launch(share=True) with gr.Blocks() as demo: gr.HTML( """

Face Un-Masking

AI Model that generate area under masks! simply upload your face image without a mask, then click submit, the model will apply digital mask then send it to the Double Context GAN to predect area under the mask.

""" ) with gr.Row(): with gr.Column(): imagein = gr.Image(label='Input',interactive=True) with gr.Column(): gr.Examples(['40868.jpg','08227.jpg','59028.jpg','31735.jpg','49936.jpg','21565.jpg'],inputs=imagein) with gr.Row(): image_button = gr.Button("Submit") with gr.Row(): with gr.Column(): crop = gr.Image(type='numpy',label='Groundtruth(cropped)',) with gr.Column(): maskedOut = gr.Image(type='numpy',label='Masked (Model-input)') with gr.Column(): genOut= gr.Image(type='numpy',label='Unmasked Output') gr.Markdown("

Made with 🖤 by Mohammed:Me.MohammedAlsinan@gmail.com & Aseel:A9eel.7neef@gmail.com

") image_button.click(fn=ExecutePipline,inputs=imagein,outputs=[crop,maskedOut,genOut]) demo.launch()