{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "Face_sub_system.ipynb",
      "provenance": [],
      "collapsed_sections": [],
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/EriProject/Multimodal_Biometrics/blob/master/Face_sub_system.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "oi3tGraDi2v8",
        "colab_type": "code",
        "outputId": "42d2dd4d-2fe4-4121-8329-2055f645b661",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 141
        }
      },
      "source": [
        "\n",
        "# import libraries\n",
        "import pandas as pd\n",
        "import matplotlib.pyplot as plt\n",
        "%matplotlib inline \n",
        "import cv2\n",
        "import os\n",
        "import tensorflow as tf\n",
        "from keras.applications import ResNet50\n",
        "from keras.models import Sequential\n",
        "from keras.applications import imagenet_utils\n",
        "#from keras.layers.core import Dense\n",
        "from keras.layers.core import Dense, Flatten, Dropout\n",
        "from keras import backend as K \n",
        "from keras.applications.resnet50 import preprocess_input\n",
        "from keras.preprocessing.image import ImageDataGenerator\n",
        "from keras.layers.convolutional import Conv2D, MaxPooling2D, SeparableConv2D\n",
        "from tensorflow.keras import optimizers\n",
        "from tensorflow.keras.optimizers import Adam, SGD\n",
        "from tensorflow.keras.models import model_from_json\n",
        "import numpy as np\n",
        "from sklearn.metrics import confusion_matrix \n",
        "\n",
        "# mount drive to google colab\n",
        "from google.colab import drive\n",
        "drive.mount('/content/drive')\n",
        "import os\n",
        "import tempfile"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Using TensorFlow backend.\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n",
            "\n",
            "Enter your authorization code:\n",
            "··········\n",
            "Mounted at /content/drive\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "75ybkLI2we6s",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# add regularizer to the model\n",
        "def add_regularization(reg_model, regularizer=tf.keras.regularizers.l2(0.0001)):\n",
        "\n",
        "    if not isinstance(regularizer, tf.keras.regularizers.Regularizer):\n",
        "      print(\"Regularizer must be a subclass of tf.keras.regularizers.Regularizer\")\n",
        "      return reg_model\n",
        "\n",
        "    for layer in reg_model.layers:\n",
        "        for attr in ['kernel_regularizer']:\n",
        "            if hasattr(layer, attr):\n",
        "              setattr(layer, attr, regularizer)\n",
        "\n",
        "    # When we change the layers attributes, the change only happens in the model config file\n",
        "    model_json = reg_model.to_json()\n",
        "\n",
        "    # Save the weights before reloading the model.\n",
        "    tmp_weights_path = os.path.join(tempfile.gettempdir(), 'tmp_weights_Final_face.h5')\n",
        "    reg_model.save_weights(tmp_weights_path)\n",
        "\n",
        "    # load the model from the config\n",
        "    reg_model = tf.keras.models.model_from_json(model_json)\n",
        "    \n",
        "    # Reload the model weights\n",
        "    reg_model.load_weights(tmp_weights_path, by_name=True)\n",
        "    return reg_model\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "c8_CTiz-nD55",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "NUM_CLASSES = 40\n",
        "CHANNELS = 3\n",
        "IMAGE_RESIZE = 224\n",
        "RESNET50_POOLING_AVERAGE = 'avg'\n",
        "DENSE_LAYER_ACTIVATION = 'softmax'\n",
        "OBJECTIVE_FUNCTION = 'categorical_crossentropy'\n",
        "EARLY_STOP_PATIENCE = 3\n",
        "# Using 1 to easily manage mapping between test_generator & prediction for submission preparation\n",
        "BATCH_SIZE_TESTING = 1\n",
        "resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' # not necessary coz already used imagenet\n",
        " "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "kIXqu48yp68m",
        "colab_type": "text"
      },
      "source": [
        "STEPS_PER_EPOCH_TRAINING = training set/ batch size"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "EchmfQcFp2K6",
        "colab_type": "code",
        "outputId": "89231b88-44a6-4bed-e146-a85dd3c1c61e",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 260
        }
      },
      "source": [
        "\n",
        "model = Sequential()\n",
        "\n",
        "# 1st layer as the lumpsum weights from resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5\n",
        "# NOTE that this layer will be set below as NOT TRAINABLE, i.e., use it as is\n",
        "Resnet_Model=ResNet50(include_top = False, pooling = RESNET50_POOLING_AVERAGE, weights = 'imagenet') #resnet_weights_path\n",
        "#model.add(Dropout(0.4))\n",
        "#Resnet_Model_improved=add_regularization(Resnet_Model)\n",
        "model.add(Resnet_Model)\n",
        "# 2nd layer as Dense for 2-class classification, i.e., dog or cat using SoftMax activation\n",
        "model.add(Dense(NUM_CLASSES,activation = DENSE_LAYER_ACTIVATION)) #,  kernel_initializer='random_normal'\n",
        "\n",
        "# Say not to train first layer (ResNet) model as it is already trained\n",
        "model.layers[0].trainable = False\n",
        "model.summary()\n",
        "type(model)\n",
        "\n",
        "\n",
        "model=add_regularization(model)\n",
        "# sgdd= optimizers.SGD(lr = 0.01, momentum = 0.9, clipvalue=0.5 ,decay = 1e-6,nesterov = True) #, clipvalue=0.5 #,decay = 1e-6\n",
        "# # model.compile(optimizer =sgd, loss = loss='categorical_crossentropy', metrics = ['accuracy']) #sgd\n",
        "# model.compile(sgdd,loss='mean_squared_error', metrics=['accuracy']) #Adam(lr= 0.0001)\n",
        " \n",
        "\n",
        "# model=add_regularization(model)\n",
        "\n",
        "model.compile(Adam(lr= 0.001),loss='categorical_crossentropy', metrics=['accuracy'])\n"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5\n",
            "94658560/94653016 [==============================] - 3s 0us/step\n",
            "Model: \"sequential_1\"\n",
            "_________________________________________________________________\n",
            "Layer (type)                 Output Shape              Param #   \n",
            "=================================================================\n",
            "resnet50 (Model)             (None, 2048)              23587712  \n",
            "_________________________________________________________________\n",
            "dense_1 (Dense)              (None, 40)                81960     \n",
            "=================================================================\n",
            "Total params: 23,669,672\n",
            "Trainable params: 81,960\n",
            "Non-trainable params: 23,587,712\n",
            "_________________________________________________________________\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "JL7Li38n4Vnx",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "\n",
        "img_width, img_height = 224, 224\n",
        "if K.image_data_format() == 'channels_first':\n",
        "    input_shape = (3, img_width, img_height)\n",
        "else:\n",
        "    input_shape = (img_width, img_height, 3)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "DhqJVw7e5tP-",
        "colab_type": "code",
        "outputId": "776aeab1-b1bc-4ee8-cd7b-e1a3efff4279",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 69
        }
      },
      "source": [
        "# These steps value should be proper FACTOR of no.-of-images in train & valid folders respectively\n",
        "# Training images processed in each step would be no.-of-train-images / STEPS_PER_EPOCH_TRAINING\n",
        "BATCH_SIZE_TRAINING = 15 #100\n",
        "BATCH_SIZE_VALIDATION =15 #100\n",
        "image_size = IMAGE_RESIZE\n",
        "STEPS_PER_EPOCH_TRAINING =  682 // BATCH_SIZE_TRAINING #10\n",
        "STEPS_PER_EPOCH_VALIDATION = 80 // BATCH_SIZE_VALIDATION\n",
        "NUM_EPOCHS = 64\n",
        "\n",
        "\n",
        "# preprocessing_function is applied on each image but only after re-sizing & augmentation (resize => augment => pre-process)\n",
        "# Each of the keras.application.resnet* preprocess_input MOSTLY mean BATCH NORMALIZATION (applied on each batch) stabilize the inputs to nonlinear activation functions\n",
        "# Batch Normalization helps in faster convergence\n",
        "   \n",
        "# Train data generator\n",
        "train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,\n",
        "    width_shift_range=0.1,\n",
        "    shear_range=0.2,\n",
        "    zoom_range=0.1,\n",
        "    horizontal_flip=True)\n",
        "\n",
        "\n",
        "#validation data generator\n",
        "validate_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n",
        "\n",
        "\n",
        "# read the images from respective directories\n",
        "# read training images\n",
        "\n",
        "\n",
        "train_data_dir = '/content/drive/My Drive/Final_Fusion/Face_Data/Train'\n",
        "validation_data_dir = '/content/drive/My Drive/Final_Fusion/Face_Data/Validation'\n",
        "test_data_dir = '/content/drive/My Drive/Final_Fusion/Face_Data/Test'\n",
        "\n",
        "\n",
        "train_generator = train_datagen.flow_from_directory(\n",
        "    train_data_dir,\n",
        "    target_size=(img_width, img_height),\n",
        "    batch_size=BATCH_SIZE_TRAINING,\n",
        "    shuffle =True,\n",
        "    class_mode='categorical')\n",
        "\n",
        "\n",
        "# read validation images\n",
        "validation_generator = validate_datagen.flow_from_directory(\n",
        "    validation_data_dir,\n",
        "    target_size=(img_width, img_height),\n",
        "    shuffle=True,\n",
        "    batch_size=BATCH_SIZE_VALIDATION,\n",
        "    class_mode='categorical')\n",
        "\n",
        "(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))\n"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Found 682 images belonging to 40 classes.\n",
            "Found 80 images belonging to 40 classes.\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(15, 46, 15, 6)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 6
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "-DpN0rF5APDx",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Early stopping & checkpointing the best model in ../working dir & restoring that as our model for prediction\n",
        "from keras.callbacks import EarlyStopping, ModelCheckpoint\n",
        "cb_early_stopper = EarlyStopping(monitor = 'val_loss', patience = EARLY_STOP_PATIENCE)\n",
        "cb_checkpointer = ModelCheckpoint(filepath = 'BesttChoiceAgain.hdf5', monitor = 'val_loss', save_best_only = True, mode = 'auto')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "kzypWRurAtCp",
        "colab_type": "code",
        "outputId": "330c5641-fbc1-45d1-f69a-865889fcfd77",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "# define the training model\n",
        "\n",
        "fit_history = model.fit(\n",
        "    train_generator,\n",
        "    steps_per_epoch=STEPS_PER_EPOCH_TRAINING, #\n",
        "    epochs = NUM_EPOCHS,\n",
        "    validation_data=validation_generator,\n",
        "    validation_steps=STEPS_PER_EPOCH_VALIDATION,\n",
        "    shuffle=True)\n",
        "    \n",
        "\n",
        "\n"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Epoch 1/64\n",
            "45/45 [==============================] - 338s 8s/step - loss: 1.3763 - accuracy: 0.7601 - val_loss: 0.2501 - val_accuracy: 0.9467\n",
            "Epoch 2/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0454 - accuracy: 1.0000 - val_loss: 0.0282 - val_accuracy: 1.0000\n",
            "Epoch 3/64\n",
            "45/45 [==============================] - 10s 232ms/step - loss: 0.0213 - accuracy: 1.0000 - val_loss: 0.0105 - val_accuracy: 1.0000\n",
            "Epoch 4/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0156 - accuracy: 1.0000 - val_loss: 0.0090 - val_accuracy: 1.0000\n",
            "Epoch 5/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0128 - accuracy: 1.0000 - val_loss: 0.0088 - val_accuracy: 1.0000\n",
            "Epoch 6/64\n",
            "45/45 [==============================] - 11s 237ms/step - loss: 0.0171 - accuracy: 1.0000 - val_loss: 0.0091 - val_accuracy: 1.0000\n",
            "Epoch 7/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0121 - accuracy: 1.0000 - val_loss: 0.0083 - val_accuracy: 1.0000\n",
            "Epoch 8/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0116 - accuracy: 1.0000 - val_loss: 0.0084 - val_accuracy: 1.0000\n",
            "Epoch 9/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0101 - accuracy: 1.0000 - val_loss: 0.0082 - val_accuracy: 1.0000\n",
            "Epoch 10/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0097 - accuracy: 1.0000 - val_loss: 0.0081 - val_accuracy: 1.0000\n",
            "Epoch 11/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0092 - accuracy: 1.0000 - val_loss: 0.0081 - val_accuracy: 1.0000\n",
            "Epoch 12/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0093 - accuracy: 1.0000 - val_loss: 0.0080 - val_accuracy: 1.0000\n",
            "Epoch 13/64\n",
            "45/45 [==============================] - 11s 237ms/step - loss: 0.0087 - accuracy: 1.0000 - val_loss: 0.0080 - val_accuracy: 1.0000\n",
            "Epoch 14/64\n",
            "45/45 [==============================] - 11s 239ms/step - loss: 0.0087 - accuracy: 1.0000 - val_loss: 0.0080 - val_accuracy: 1.0000\n",
            "Epoch 15/64\n",
            "45/45 [==============================] - 11s 239ms/step - loss: 0.0087 - accuracy: 1.0000 - val_loss: 0.0080 - val_accuracy: 1.0000\n",
            "Epoch 16/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0086 - accuracy: 1.0000 - val_loss: 0.0080 - val_accuracy: 1.0000\n",
            "Epoch 17/64\n",
            "45/45 [==============================] - 11s 237ms/step - loss: 0.0085 - accuracy: 1.0000 - val_loss: 0.0079 - val_accuracy: 1.0000\n",
            "Epoch 18/64\n",
            "45/45 [==============================] - 11s 237ms/step - loss: 0.0084 - accuracy: 1.0000 - val_loss: 0.0079 - val_accuracy: 1.0000\n",
            "Epoch 19/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0084 - accuracy: 1.0000 - val_loss: 0.0079 - val_accuracy: 1.0000\n",
            "Epoch 20/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0083 - accuracy: 1.0000 - val_loss: 0.0079 - val_accuracy: 1.0000\n",
            "Epoch 21/64\n",
            "45/45 [==============================] - 11s 238ms/step - loss: 0.0083 - accuracy: 1.0000 - val_loss: 0.0079 - val_accuracy: 1.0000\n",
            "Epoch 22/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0083 - accuracy: 1.0000 - val_loss: 0.0079 - val_accuracy: 1.0000\n",
            "Epoch 23/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0082 - accuracy: 1.0000 - val_loss: 0.0079 - val_accuracy: 1.0000\n",
            "Epoch 24/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0082 - accuracy: 1.0000 - val_loss: 0.0079 - val_accuracy: 1.0000\n",
            "Epoch 25/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0081 - accuracy: 1.0000 - val_loss: 0.0079 - val_accuracy: 1.0000\n",
            "Epoch 26/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0082 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 27/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0081 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 28/64\n",
            "45/45 [==============================] - 11s 241ms/step - loss: 0.0082 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 29/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0081 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 30/64\n",
            "45/45 [==============================] - 11s 237ms/step - loss: 0.0081 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 31/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0081 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 32/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0082 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 33/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0095 - accuracy: 1.0000 - val_loss: 0.1209 - val_accuracy: 0.9600\n",
            "Epoch 34/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0145 - accuracy: 1.0000 - val_loss: 0.0094 - val_accuracy: 1.0000\n",
            "Epoch 35/64\n",
            "45/45 [==============================] - 11s 237ms/step - loss: 0.0100 - accuracy: 1.0000 - val_loss: 0.0080 - val_accuracy: 1.0000\n",
            "Epoch 36/64\n",
            "45/45 [==============================] - 11s 238ms/step - loss: 0.0085 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 37/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0081 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 38/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0081 - accuracy: 1.0000 - val_loss: 0.0078 - val_accuracy: 1.0000\n",
            "Epoch 39/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0081 - accuracy: 1.0000 - val_loss: 0.0077 - val_accuracy: 1.0000\n",
            "Epoch 40/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0080 - accuracy: 1.0000 - val_loss: 0.0077 - val_accuracy: 1.0000\n",
            "Epoch 41/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0080 - accuracy: 1.0000 - val_loss: 0.0077 - val_accuracy: 1.0000\n",
            "Epoch 42/64\n",
            "45/45 [==============================] - 11s 240ms/step - loss: 0.0080 - accuracy: 1.0000 - val_loss: 0.0077 - val_accuracy: 1.0000\n",
            "Epoch 43/64\n",
            "45/45 [==============================] - 11s 238ms/step - loss: 0.0078 - accuracy: 1.0000 - val_loss: 0.0077 - val_accuracy: 1.0000\n",
            "Epoch 44/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0079 - accuracy: 1.0000 - val_loss: 0.0077 - val_accuracy: 1.0000\n",
            "Epoch 45/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0078 - accuracy: 1.0000 - val_loss: 0.0076 - val_accuracy: 1.0000\n",
            "Epoch 46/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0079 - accuracy: 1.0000 - val_loss: 0.0076 - val_accuracy: 1.0000\n",
            "Epoch 47/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0078 - accuracy: 1.0000 - val_loss: 0.0076 - val_accuracy: 1.0000\n",
            "Epoch 48/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0077 - accuracy: 1.0000 - val_loss: 0.0076 - val_accuracy: 1.0000\n",
            "Epoch 49/64\n",
            "45/45 [==============================] - 11s 237ms/step - loss: 0.0077 - accuracy: 1.0000 - val_loss: 0.0076 - val_accuracy: 1.0000\n",
            "Epoch 50/64\n",
            "45/45 [==============================] - 11s 240ms/step - loss: 0.0077 - accuracy: 1.0000 - val_loss: 0.0076 - val_accuracy: 1.0000\n",
            "Epoch 51/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 0.0075 - val_accuracy: 1.0000\n",
            "Epoch 52/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 0.0075 - val_accuracy: 1.0000\n",
            "Epoch 53/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 0.0075 - val_accuracy: 1.0000\n",
            "Epoch 54/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 0.0075 - val_accuracy: 1.0000\n",
            "Epoch 55/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 0.0075 - val_accuracy: 1.0000\n",
            "Epoch 56/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0075 - accuracy: 1.0000 - val_loss: 0.0074 - val_accuracy: 1.0000\n",
            "Epoch 57/64\n",
            "45/45 [==============================] - 11s 237ms/step - loss: 0.0075 - accuracy: 1.0000 - val_loss: 0.0074 - val_accuracy: 1.0000\n",
            "Epoch 58/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0075 - accuracy: 1.0000 - val_loss: 0.0074 - val_accuracy: 1.0000\n",
            "Epoch 59/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0075 - accuracy: 1.0000 - val_loss: 0.0074 - val_accuracy: 1.0000\n",
            "Epoch 60/64\n",
            "45/45 [==============================] - 11s 236ms/step - loss: 0.0074 - accuracy: 1.0000 - val_loss: 0.0074 - val_accuracy: 1.0000\n",
            "Epoch 61/64\n",
            "45/45 [==============================] - 11s 234ms/step - loss: 0.0074 - accuracy: 1.0000 - val_loss: 0.0073 - val_accuracy: 1.0000\n",
            "Epoch 62/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0075 - accuracy: 1.0000 - val_loss: 0.0073 - val_accuracy: 1.0000\n",
            "Epoch 63/64\n",
            "45/45 [==============================] - 11s 238ms/step - loss: 0.0074 - accuracy: 1.0000 - val_loss: 0.0073 - val_accuracy: 1.0000\n",
            "Epoch 64/64\n",
            "45/45 [==============================] - 11s 235ms/step - loss: 0.0073 - accuracy: 1.0000 - val_loss: 0.0073 - val_accuracy: 1.0000\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "BnIt8oBijLUU",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def save_model_and_weights(our_model):\n",
        "    model_json = our_model.to_json()\n",
        "    with open(\"/content/drive/My Drive/SAVED MODELS/Face Model/model2.json\", \"w\") as json_file:\n",
        "        json_file.write(model_json)\n",
        "        print(\"[INFO] the model is saved into jason file.\")\n",
        "        # serialize weights to HDF5\n",
        "        our_model.save_weights('/content/drive/My Drive/SAVED MODELS/Face Model/second_face.h5')\n",
        "        print(\"[INFO] Model weights are saved successfully\")\n",
        "\n",
        "        \n",
        "# save the model and weights\n",
        "# save_model_and_weights(model)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "-_dJQbAPNyiC",
        "colab_type": "code",
        "outputId": "79d0eb52-a430-4410-e6e9-06a9d35316b7",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 293
        }
      },
      "source": [
        "print(fit_history.history.keys())\n",
        "plt.figure(1, figsize = (15,8))   \n",
        "plt.subplot(221)  \n",
        "plt.plot(fit_history.history['accuracy'])  \n",
        "plt.plot(fit_history.history['val_accuracy'])  \n",
        "plt.title('model accuracy')  \n",
        "plt.ylabel('accuracy')  \n",
        "plt.xlabel('epoch')  \n",
        "plt.legend(['train', 'valid']) \n",
        "plt.subplot(222)  \n",
        "plt.plot(fit_history.history['loss'])  \n",
        "plt.plot(fit_history.history['val_loss'])  \n",
        "plt.title('model loss')  \n",
        "plt.ylabel('loss')  \n",
        "plt.xlabel('epoch')  \n",
        "plt.legend(['train', 'valid']) \n",
        "plt.show()\n"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "display_data",
          "data": {
            "image/png": "iVBORw0KGgoAAAANSUhEUgAAA34AAAEDCAYAAABqCtYiAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nOzde5zWdZ3//8dzziAgCAgKIlSmICAqHvpaRlkuWnnohKZt7bbyzez41b5rbT81t75r22lz8xAVW7aisZjJtparhVmrluMJ8YwKMSAHOQnKDHN4/f74fK6Zi+Ga4YKZa675DM/77Ta3uT7n12cY/czr836/X29FBGZmZmZmZjZwVZQ7ADMzMzMzMystJ35mZmZmZmYDnBM/MzMzMzOzAc6Jn5mZmZmZ2QDnxM/MzMzMzGyAc+JnZmZmZmY2wDnxMysxST+R9LUi910h6V2ljsnMzCzreuv5ujfnMcsyJ35mZmZmZmYDnBM/MyuKpKpyx2BmZmZm+8aJnxntXUC+KGmppNck/VjSGEm/lrRN0j2SRuTtf5akJyVtkXSvpMl5246V9Eh63M+Buk7Xeq+kx9Jj75c0vcgY3yPpUUmvSlol6apO29+anm9Luv3j6fpBkr4taaWkrZL+mK6bJamhwM/hXennqyQtkvTvkl4FPi7pREkPpNd4WdL3JdXkHX+0pLslbZK0TtKXJY2V9LqkkXn7HSdpg6TqYu7dzMyyKQvP1wIxXyRpefosWyzp0HS9JH1X0vr0WfyEpKnptjMlPZXGtlrSZfv0AzMrISd+Zh0+ALwbeDPwPuDXwJeB0ST/rXwWQNKbgVuAz6fb7gT+U1JNmgT9EvgZcBDwH+l5SY89FpgP/G9gJPADYLGk2iLiew34a2A48B7gYknnpOc9PI33X9OYZgCPpcd9Czge+F9pTP8XaCvyZ3I2sCi95s1AK/AFYBTwFuA04FNpDEOBe4DfAIcCbwJ+GxFrgXuBD+ed96PArRHRXGQcZmaWXf39+dpO0juBfyJ5Zh0CrARuTTefDpya3seB6T4b020/Bv53RAwFpgK/25vrmvUFJ35mHf41ItZFxGrgD8CfIuLRiGgEbgeOTfebA/xXRNydJi7fAgaRJFYnA9XAv0REc0QsAh7Ku8Zc4AcR8aeIaI2InwJN6XHdioh7I+KJiGiLiKUkD8e3p5s/AtwTEbek190YEY9JqgD+FvhcRKxOr3l/RDQV+TN5ICJ+mV5zR0Q8HBEPRkRLRKwgebDmYngvsDYivh0RjRGxLSL+lG77KXAhgKRK4HySh7eZmQ18/fr52skFwPyIeCR9Vn4JeIukiUAzMBQ4ClBEPB0RL6fHNQNTJA2LiM0R8cheXtes5Jz4mXVYl/d5R4HlIennQ0neAAIQEW3AKmBcum11RETesSvzPh8OXJp2Q9kiaQtwWHpctySdJGlJ2kVyK/BJkpY30nO8UOCwUSRdYQptK8aqTjG8WdKvJK1Nu3/+vyJiALiD5IE4ieSt79aI+PM+xmRmZtnSr5+vnXSOYTtJq964iPgd8H3gOmC9pHmShqW7fgA4E1gp6feS3rKX1zUrOSd+ZntvDckDBkj6/JM8XFYDLwPj0nU5E/I+rwK+HhHD874GR8QtRVx3AbAYOCwiDgRuBHLXWQW8scAxrwCNXWx7DRicdx+VJF1r8kWn5RuAZ4AjImIYSVed/BjeUCjw9K3uQpJWv4/i1j4zM9tduZ6v3cVwAEnX0dUAEXFtRBwPTCHp8vnFdP1DEXE2cDBJl9SFe3lds5Jz4me29xYC75F0Wlqc5FKS7iT3Aw8ALcBnJVVLej9wYt6xPwQ+mbbeSdIBSoq2DC3iukOBTRHRKOlEku6dOTcD75L0YUlVkkZKmpG+LZ0PfEfSoZIqJb0lHfPwHFCXXr8a+Aqwp7EQQ4FXge2SjgIuztv2K+AQSZ+XVCtpqKST8rbfBHwcOAsnfmZmtrtyPV/z3QL8jaQZ6bPy/5F0TV0h6YT0/NUkL08bgbZ0DOIFkg5Mu6i+SvFj6c36jBM/s70UEc+StFz9K0mL2vuA90XEzojYCbyfJMHZRDJe4Rd5x9YDF5F0FdkMLE/3LcangKslbQOuIO9tYkT8haSLyaXpdR8Djkk3XwY8QTIWYhPwDaAiIram5/wRyZvM14BdqnwWcBlJwrmN5CH787wYtpF043wfsBZ4HnhH3vb/IXkQPhIR+d1zzMzMyvl8zY/hHuD/A24jaWV8I3BeunkYybNvM0l30I3AN9NtHwVWpMMgPkkyVtCsX9GuXaXNzEpH0u+ABRHxo3LHYmZmZrY/ceJnZn1C0gnA3SRjFLeVOx4zMzOz/Ym7eppZyUn6Kckcf5930mdmZmbW99ziZ2ZmZmZmNsC5xc/MzMzMzGyAc+JnZmZmZmY2wFWVO4DeMmrUqJg4cWK5wzAzsz7w8MMPvxIRo8sdR1b4GWlmtn/o7vk4YBK/iRMnUl9fX+4wzMysD0jyXJB7wc9IM7P9Q3fPR3f1NDMz6yckzZe0XtKyPex3gqQWSR/sq9jMzCzbnPiZmZn1Hz8BZne3g6RK4BvAf/dFQGZmNjA48TMzM+snIuI+YNMedvsMcBuwvvQRmZnZQFGyMX6S5gPvBdZHxNQC2wV8DzgTeB34eEQ8km77GPCVdNevRcRPSxWnmZlZVkgaB5wLvAM4oczhmJn1K83NzTQ0NNDY2FjuUEqurq6O8ePHU11dXfQxpSzu8hPg+8BNXWw/Azgi/ToJuAE4SdJBwJXATCCAhyUtjojNJYzVzMwsC/4F+PuIaEven3ZN0lxgLsCECRP6IDQzs/JqaGhg6NChTJw4kT39PzLLIoKNGzfS0NDApEmTij6uZF09i+iucjZwUyQeBIZLOgT4K+DuiNiUJnt3s4fxDmZmZvuJmcCtklYAHwSul3ROoR0jYl5EzIyImaNHe+YLMxv4GhsbGTly5IBO+gAkMXLkyL1u2SzndA7jgFV5yw3puq7Wl8ULSx9g02+/i6KtXCGY7eaRA9/F00NO3uN+J275L4547dE+iMiseENOmctRJ51e7jAyKSLaX+1K+gnwq4j4Zamv+9un17H48TV860PHUF3p8gBm1n8N9KQvZ1/uM9Pz+PVFN5Yt//Mjjt1yN+sq/LbU+oeDYgs12xv497oju98xgqt23EAFrbyqoX0TnFkR1m91TZKuSLoFmAWMktRAMvShGiAibixXXC9s2M4dj63h6+dOc+JnZtaNLVu2sGDBAj71qU/t1XFnnnkmCxYsYPjw4SWKrLyJ32rgsLzl8em61SQPvfz19xY6QUTMA+YBzJw5M0oR5LAdDTzH4Uy58rFSnN5s7/3XZUx//Fbuu+ztUNHNH2CvroHvbIMzvsnQk+b2XXxme1C2LhwZEBHn78W+Hy9hKLuoraoEoKm5lSG1mX5nbGZWUlu2bOH666/fLfFraWmhqqrr/3/eeeedpQ6trNM5LAb+WomTga0R8TJwF3C6pBGSRgCnp+vKYkRjAw0Vh5Tr8ma7GzsVdm6DLSu732/tso79zcx6oK46+XOhqcXDHszMunP55ZfzwgsvMGPGDE444QTe9ra3cdZZZzFlyhQAzjnnHI4//niOPvpo5s2b137cxIkTeeWVV1ixYgWTJ0/moosu4uijj+b0009nx44dvRJbyRK/tLvKA8CRkhokfULSJyV9Mt3lTuBFYDnwQ+BTABGxCfhH4KH06+p0Xd9rbWHEzpdZ68TP+pMx05Lv65Z1v9+6J9L9jy5tPGY24LW3+DnxMzPr1jXXXMMb3/hGHnvsMb75zW/yyCOP8L3vfY/nnnsOgPnz5/Pwww9TX1/Ptddey8aNG3c7x/PPP88ll1zCk08+yfDhw7ntttt6JbaS9dfYU3eViAjgki62zQfmlyKuvfJqA5W0sq7KiZ/1IwdPBpS06E1+X9f7rV0GwydA3YF9FpqZDUy1VbkWv9YyR2JmVpyv/ueTPLXm1V4955RDh3Hl+/buhfqJJ564y5QL1157LbfffjsAq1at4vnnn2fkyJG7HDNp0iRmzJgBwPHHH8+KFSt6FnjKHfW7s+klADZUHVrmQMzy1AyGkW8sosVvWUfroJlZD9Tmuno2u8XPzGxvHHDAAe2f7733Xu655x4eeOABBg8ezKxZswpOyVBbW9v+ubKyste6ejrx687mJPHbWOtSBNbPjJkKa7qZpqF5B2xcDlMKTu9lZrZX3NXTzLJmb1vmesvQoUPZtm1bwW1bt25lxIgRDB48mGeeeYYHH3ywT2Nz4tedTS+xk2q2V3sqB+tnxk6Fp34Jja9C3bDdt69/CqLNhV3MrFe4q6eZWXFGjhzJKaecwtSpUxk0aBBjxoxp3zZ79mxuvPFGJk+ezJFHHsnJJ+95Tube5MSvO5teZF3lWGpq/GOyfqa9wMuTcPhbdt+eq+g5xomfmfVcx3QObvEzM9uTBQsWFFxfW1vLr3/964LbcuP4Ro0axbJlHcN5Lrvssl6Ly7OwdmfzCtZUjG1/02nWb+Ra8roa57duGdQMgRGTCm83M9sLtZ7Owcws85zRdCUCNr1Eg8a2v+k06zeGjYO64V0nfmuXwcFTup/g3cysSO7qaWaWff6rsCuvbYDm11gVY9ziZ/2PBGOndXTpzBeRdAH1+D4z6yUu7mJmln3OaLqSTuXwUtsYapz4WX80ZmpSxKWt0xv4LX+Bpq0e32dmvaa9xa/ZLX5mZlnljKYrm14E4KW2g93iZ/3TmKOh+fX2lxTt1j2ZfB/rOfzMrHd4jJ+ZWfY5o+nK5pdAFaxoGUVttcf4WT/UXuDliV3Xr1sGKBnjZ2bWC2oqnfiZmWWdE7+ubHqJGDaO7S1yi5/1T6Mngyp3H+e39gk4aBLUDilPXGY24FRVVlBVIRrd1dPMrFcNGZL8vbZmzRo++MEPFtxn1qxZ1NfX9/hazmi6svklYsQk2gInftY/VdfBqCN2r+y5bpnH95lZr6utqnCLn5lZiRx66KEsWrSopNdwRtOVTS/ROnwigKdzsP5rzNSOMX0ATduTMX8e32dmvay2utLTOZiZ7cHll1/Odddd17581VVX8bWvfY3TTjuN4447jmnTpnHHHXfsdtyKFSuYOjV5cb9jxw7OO+88Jk+ezLnnnsuOHTt6JTYnfoU0vgqvv8LOYYcDuKqn9V9jp8LWVbBjc7K8/ikgksIvZma9qLaqgqZmt/iZmXVnzpw5LFy4sH154cKFfOxjH+P222/nkUceYcmSJVx66aVERJfnuOGGGxg8eDBPP/00X/3qV3n44Yd7JbaqXjnLQLM5qZKYS/zc1dP6rTFpy966J2HiW5PxfeCunmYZJWk+8F5gfUTs9h+ypAuAvwcEbAMujojH+yI2d/U0s0z59eUdfxf1lrHT4Ixrut3l2GOPZf369axZs4YNGzYwYsQIxo4dyxe+8AXuu+8+KioqWL16NevWrWPs2LEFz3Hffffx2c9+FoDp06czffr0XgnfiV8haXn8HQdMAF5pL2Nt1u/kKnuuXZYkfuuWQe2BMHxCeeMys331E+D7wE1dbH8JeHtEbJZ0BjAPOKkvAqutcldPM7NifOhDH2LRokWsXbuWOXPmcPPNN7NhwwYefvhhqqurmThxIo2NjX0elxO/QtIWv+25xM9j/Ky/GjIGBo/qmNJh7bKkm6dU3rjMbJ9ExH2SJnaz/f68xQeB8aWOKae22i1+ZpYhe2iZK6U5c+Zw0UUX8corr/D73/+ehQsXcvDBB1NdXc2SJUtYuXJlt8efeuqpLFiwgHe+850sW7aMpUuX9kpcbsoqZNNLMHgUjRWDAHf1tH5MShK9tcugrS3p8jnW3TzN9hOfAH7dVxfzGD8zs+IcffTRbNu2jXHjxnHIIYdwwQUXUF9fz7Rp07jppps46qijuj3+4osvZvv27UyePJkrrriC448/vlficotfIZtfgoPe0P5m0y1+1q+NnQZ//iFsegGaX/P4PrP9gKR3kCR+b+1mn7nAXIAJE3re/bu2qpLXd7b0+DxmZvuDJ57oGF84atQoHnjggYL7bd++HYCJEyeybFkyRdegQYO49dZbez0mN2UVsuklOGgSO9PEz1U9rV8bMxVam+DJXybLbvEzG9AkTQd+BJwdERu72i8i5kXEzIiYOXr06B5f18VdzMyyzRlNZy1NsLUBRkxqH8Turp7Wr+USvcdvAVXAwVPKG4+ZlYykCcAvgI9GxHN9eW2P8TMzyzZ39exsy1+AgIMmtY9lcFVP69dGHQkV1UlXz1FvhupB5Y7IzPaRpFuAWcAoSQ3AlUA1QETcCFwBjASuV1LEqSUiZvZFbHWu6mlmlmlO/DpLp3JgxCSaNnqMn2VAVQ2MPjKZysHj+8wyLSLO38P2vwP+ro/C2UVttYu7mFn/FxFoP6hu3t0E8F1xU1Zn6VQOHOSunpYhuYTP4/vMrESSefyc+JlZ/1VXV8fGjRv3KSnKkohg48aN1NXV7dVxbvHrbNOLUDMEDhhNU0syx4aLu1i/N3YqLAXGTCt3JGY2QCXFXdzV08z6r/Hjx9PQ0MCGDRvKHUrJ1dXVMX783k3l6sSvs00vwYhJILVX9XSLn/V7R54JL90HE04qdyRmNkDlqnruL92ozCx7qqurmTRpUrnD6Lec+HW2+aVkvBR4Hj/LjpFvhAv+o9xRmNkAVltdSQQ0twY1VU78zMyyxk1Z+draYPPKpMUPaGpuRYLqSj/gzMxs/5br/eLunmZm2eTEL9+2NclE2AeliV9LG7VVFe7SYmZm+72OxM8FXszMsqikiZ+k2ZKelbRc0uUFth8u6beSlkq6V9L4vG2tkh5LvxaXMs52uakcDnoDkEv83M3TzMws9zx04mdmlk0lG+MnqRK4Dng30AA8JGlxRDyVt9u3gJsi4qeS3gn8E/DRdNuOiJhRqvgK2vRi8n1ER4ufK3qamZkl8/hBMgzCzMyyp5RZzYnA8oh4MSJ2ArcCZ3faZwrwu/TzkgLb+9bml6CiGg5MGh6bWlpd0dPMzAx39TQzy7pSZjXjgFV5yw3punyPA+9PP58LDJU0Ml2uk1Qv6UFJ55Qwzg6bXoLhE6CiozuLEz8zMzN39TQzy7pyZzWXAW+X9CjwdmA1kOtDcnhEzAQ+AvyLpDd2PljS3DQ5rO+ViRo3v9Re2AWgqdlj/MzMzKCjxa/RXT3NzDKplInfauCwvOXx6bp2EbEmIt4fEccC/5Cu25J+X51+fxG4Fzi28wUiYl5EzIyImaNHj+5ZtBGwaUX7+D5Iu3pWlzs3NjMzK7/2MX5u8TMzy6RSZjUPAUdImiSpBjgP2KU6p6RRknIxfAmYn64fIak2tw9wCpBfFKb3Nb+eTII95uj2VU0tbdRUOvEzMzNr7+rpFj8zs0wqWVXPiGiR9GngLqASmB8RT0q6GqiPiMXALOCfJAVwH3BJevhk4AeS2kiS02s6VQPtfTUHwNwlu6za2dLGsEHVJb2smZlZFri4i5lZtpUs8QOIiDuBOzutuyLv8yJgUYHj7gemlTK2Yri4i5mZWcLFXczMss1ZTTc8nYOZmVmiY4yfu3qamWWRs5puuKqnmZlZor2rZ7Nb/MzMssiJXzeaWtpc1dPMzAx39TQzyzpnNd3Y2dLqqp5mZtZnJM2XtF7Ssi62S9K1kpZLWirpuL6KrabKXT3NzLLMWU033OJnZmZ97CfA7G62nwEckX7NBW7og5gAqKwQ1ZVyi5+ZWUY5q+lCRKRVPT3Gz8zM+kZE3Ads6maXs4GbIvEgMFzSIX0TXdLd02P8zMyyyYlfF3a2Jg82V/U0M7N+ZBywKm+5IV3XJ+qqK9zV08wso5zVdCHXlcWJn5mZZZGkuZLqJdVv2LChV85ZW1Xprp5mZhnlrKYLua4stdXu6mlmZv3GauCwvOXx6brdRMS8iJgZETNHjx7dKxevrapw4mdmllFO/LrQ3tXTVT3NzKz/WAz8dVrd82Rga0S83FcXr6mqoKnZXT3NzLKoqtwB9Fe5B5urepqZWV+RdAswCxglqQG4EqgGiIgbgTuBM4HlwOvA3/RlfLXV7uppZpZVTvy64DF+ZmbW1yLi/D1sD+CSPgpnN0lXT7f4mZllkbOaLnQkfh7jZ2ZmBh7jZ2aWZU78utDe1dMtfmZmZoDn8TMzyzJnNV3IFXepceJnZmYGJOPe3dXTzCybnNV0oX06B3f1NDMzA9zV08wsy5z4daF9jJ+repqZmQGewN3MLMuc1XQh15XFY/zMzMwStVUVNHoePzOzTHJW0wVX9TQzM9tVMsbPLX5mZlnkxK8LruppZma2q9qqSna2tJFMJ2hmZlnirKYLruppZma2q9zLULf6mZllj7OaLnRU9fSPyMzMDJz4mZllmbOaLjS1tFFZIaoq/SMyMzMDqK1Oxr17Lj8zs+xxVtOFppZWt/aZmZnlaW/xa3aLn5lZ1jiz6UJTS5sTPzMzszzu6mlmll3ObLrQ1NzmqRzMzMzy5J6L7uppZpY9Tvy6sLO1zRU9zczM8tRWu8XPzCyrispsJP1C0nsk7TeZkMf4mZmZ7cpj/MzMsqvYzOZ64CPA85KukXRkMQdJmi3pWUnLJV1eYPvhkn4raamkeyWNz9v2MUnPp18fKzLOXtPU3Nb+ZtPMzMzc1dPMLMuKymwi4p6IuAA4DlgB3CPpfkl/I6m60DGSKoHrgDOAKcD5kqZ02u1bwE0RMR24Gvin9NiDgCuBk4ATgSsljdjbm+uJpLiLx/iZmVnfKuKl6QRJSyQ9mr44PbOvYnNxFzOz7Cq6SUvSSODjwN8BjwLfI0kE7+7ikBOB5RHxYkTsBG4Fzu60zxTgd+nnJXnb/wq4OyI2RcTm9Bqzi421N7irp5mZ9bUiX5p+BVgYEccC55H0yukTde3z+DnxMzPLmmLH+N0O/AEYDLwvIs6KiJ9HxGeAIV0cNg5YlbfckK7L9zjw/vTzucDQNMEs5tiS2tni4i5mZtbninlpGsCw9POBwJq+Cq5jjJ+7epqZZU1VkftdGxFLCm2IiJk9uP5lwPclfRy4D1gNFP00kTQXmAswYcKEHoSxO8/jZ2ZmZVDoxedJnfa5CvhvSZ8BDgDe1TehuaqnmVmWFZvZTJE0PLcgaYSkT+3hmNXAYXnL49N17SJiTUS8P+2u8g/pui3FHJvuOy8iZkbEzNGjRxd5K8XxGD8zM+unzgd+EhHjgTOBnxWqui1prqR6SfUbNmzolQt3FHdx4mdmljXFJn4XpQkZAOm4u4v2cMxDwBGSJkmqIRmHsDh/B0mj8h5WXwLmp5/vAk5PE8wRwOnpuj7T1OwxfmZm1ueKefH5CWAhQEQ8ANQBozqfqBQvRzuKu7irp5lZ1hSb2VRKUm4hHXxe090BEdECfJokYXuaZCD6k5KulnRWutss4FlJzwFjgK+nx24C/pEkeXwIuDpd12eaWjydg5mZ9bk9vjQF/gKcBiBpMkni1ztNenvgefzMzLKr2DF+vwF+LukH6fL/Ttd1KyLuBO7stO6KvM+LgEVdHDufjhbAPueunmZm1tciokVS7qVpJTA/99IUqI+IxcClwA8lfYGk0MvHIyL6Ij5J1FRVuKunmVkGFZv4/T1Jsndxunw38KOSRNRPuKqnmZmVQxEvTZ8CTunruHJqqyrc1dPMLIOKSvwiog24If0a8Nragp2truppZmbWWW1VpVv8zMwyqKjET9IRwD+RTCZbl1sfEW8oUVxltbM1eaC5q6eZmdmuaqsqPMbPzCyDim3S+jeS1r4W4B3ATcC/lyqocss90NziZ2Zm+0rS5yQNU+LHkh6RdHq54+qp2uoKGt3V08wsc4rNbAZFxG8BRcTKiLgKeE/pwiqv3NgFV/U0M7Me+NuIeJVkSqIRwEeBa8obUs/VVlW6xc/MLIOKLe7SlM6393xabWw1MKR0YZVXbuyCu3qamVkP5KZBOhP4WVqdU90dkAUu7mJmlk3FNml9DhgMfBY4HrgQ+Fipgiq3XOLnqp5mZtYDD0v6b5LE7y5JQ4HMN5XVejoHM7NM2mOLXzpZ+5yIuAzYDvxNyaMqs/aunk78zMxs330CmAG8GBGvSzqIAfAMra2uZOuO5nKHYWZme2mPmU1EtAJv7YNY+o2Orp5O/MzMbJ+9BXg2IrZIuhD4CrC1zDH1WFLV0109zcyyptgxfo9KWgz8B/BabmVE/KIkUZVZR1VPj/EzM7N9dgNwjKRjgEuBH5FUxX57WaPqodqqCna6q6eZWeYUm/jVARuBd+atC2BgJn6u6mlmZj3XEhEh6Wzg+xHxY0mfKHdQPeUJ3M3MsqmoxC8iMj8mYW/k3mTWVDrxMzOzfbZN0pdIpnF4W1odu7rMMfVYbbWrepqZZVFRiZ+kfyNp4dtFRPxtr0fUD+TeZNa5xc/MzPbdHOAjJPP5rZU0AfhmmWPqsWSMn1v8zMyyptiunr/K+1wHnAus6f1w+gfP42dmZj2VJns3AydIei/w54i4qdxx9ZS7epqZZVOxXT1vy1+WdAvwx5JE1A94OgczM+spSR8maeG7l2Qy93+V9MWIWFTWwHqotqqCna1ttLUFFRWZn4/ezGy/UWyLX2dHAAf3ZiD9iat6mplZL/gH4ISIWA8gaTRwD5DtxC8dBrGztY26Cj8nzcyyotgxftvYdYzfWuDvSxJRP7CzNU38PMbPzMz2XUUu6UttpIj5c/u73EvRpuY26qqd+JmZZUWxXT2HljqQ/iTX4ueqnmZm1gO/kXQXcEu6PAe4s4zx9Ipc4bNkWETmi5Same03ispsJJ0r6cC85eGSzildWOXV1NJKdaU8dsHMzPZZRHwRmAdMT7/mRcQee8tImi3pWUnLJV3exT4flvSUpCclLejdyLvX3uLnAi9mZplS7Bi/KyPi9txCRGyRdCXwy9KEVV5NLW0e32dmZj2WFke7bY87piRVAtcB7wYagIckLY6Ip/L2OQL4EnBKRGyW1Kdj7nOFzzyXn5lZthSb+BVqGdzXwjD9XlNLqyt6mpnZPikwLr59ExARMaybw08ElkfEi+m5bgXOBp7K2+ci4LqI2ExywvW7naWEcs/HRs/lZ2aWKcUmb/WSvkPyFhLgEuDh0oRUfk3NbU78zMxsn/RwXPw4YC7XrAwAAB7gSURBVFXecgNwUqd93gwg6X+ASuCqiPhND665V2qr3dXTzCyLis1uPgPsBH4O3Ao0kiR/A9LO1rb2B5uZmVk/U0UyrdIs4Hzgh5KGd95J0lxJ9ZLqN2zY0GsXd1dPM7NsKraq52tAwQHmA1FTc5srepqZWTmsBg7LWx6frsvXAPwpIpqBlyQ9R5IIPpS/U0TMIykuw8yZMwt1Pd0nHYmfW/zMzLKk2Kqed+e/TZQ0Ii1RPSA1tbR6Dj8zMyuHh4AjJE2SVAOcByzutM8vSVr7kDSKpOvni30VYP48fmZmlh3FZjejImJLbiEdUN6nVcT6UlLV04mfmZn1rYhoAT4N3AU8DSyMiCclXS3prHS3u4CNkp4ClgBfjIiNfRVjbbW7epqZZVGxxV3aJE2IiL8ASJpI4YplA0JTSxuDPMbPzMzKICLupNNE7xFxRd7nAP5P+tXn2rt6usXPzCxTik38/gH4o6Tfk5Sjfhswt2RRldnOljaGD6oudxhmZmb9TscE7m7xMzPLkmKLu/xG0kySZO9RkvEFO0oZWDk1tbRS466eZmZmu+no6ukWPzOzLCm2uMvfAb8FLgUuA34GXFXEcbMlPStpuaTdqoJKmiBpiaRHJS2VdGa6fqKkHZIeS79u3Jub6imP8TMzMyvMVT3NzLKp2Ozmc8AJwMqIeAdwLLCluwMkVZJM+H4GMAU4X9KUTrt9hWTg+rEklcuuz9v2QkTMSL8+WWScvSKZwN1j/MzMzDrLTXfU1OyunmZmWVJs4tcYEY0Akmoj4hngyD0ccyKwPCJejIidJBO/n91pnwCGpZ8PBNYUGU9JeToHMzOzwiRRW1XhFj8zs4wpNrtpSOfx+yVwt6Q7gJV7OGYcsCr/HOm6fFcBF0pqIKlg9pm8bZPSLqC/l/S2QheQNFdSvaT6DRs2FHkre+aunmZmZl1z4mdmlj3FFnc5N/14laQlJK1zv+mF658P/CQivi3pLcDPJE0FXgYmRMRGSccDv5R0dES82imuecA8gJkzZ/ba9BI7W9zV08zMrCu11ZWu6mlmljHFTufQLiJ+X+Suq4HD8pbHp+vyfQKYnZ73AUl1JJPFrwea0vUPS3oBeDNQv7fx7q2W1jZa2sJVPc3MzLpQW1XhefzMzDKmlNnNQ8ARkiZJqiEp3rK40z5/AU4DkDQZqAM2SBqdFodB0huAI4AXSxhru52tyYPMXT3NzMwKc1dPM7Ps2esWv2JFRIukTwN3AZXA/Ih4UtLVQH1ELCaZHuKHkr5AUujl4xERkk4FrpbUDLQBn4yITaWKNV/uDaYTPzMzs8Jqq9zV08wsa0qW+AFExJ0kRVvy112R9/kp4JQCx90G3FbK2LqSe4NZW+0xfmZmZoXUVrvFz8wsa9ys1cnOFrf4mZmZdcdj/MzMssfZTSe5riuu6mlmZlaYu3qamWWPE79Ocl1XXNXTzMysMBd3MTPLHmc3nXS0+PlHY2ZmVkhddaUTPzOzjHF204mrepqZmXUvGePnrp5mZlni7KYTV/U0MzPrnqt6mplljxO/Tppc1dPMzMpI0mxJz0paLunybvb7gKSQNLMv44NccRcnfmZmWeLsppPcGD8XdzEzs74mqRK4DjgDmAKcL2lKgf2GAp8D/tS3ESaS4i7u6mlmliXObjpxi5+ZmZXRicDyiHgxInYCtwJnF9jvH4FvAI19GVxObVUlza1Ba1uU4/JmZrYPnN100pH4eYyfmZn1uXHAqrzlhnRdO0nHAYdFxH/1ZWD5aquTPx92urunmVlmOPHrJFelLPdQMzMz6y8kVQDfAS4tYt+5kuol1W/YsKFX48j1iml0ZU8zs8xwdtPJzlZ39TQzs7JZDRyWtzw+XZczFJgK3CtpBXAysLhQgZeImBcRMyNi5ujRo3s1yFyvGBd4MTPLDmc3neTm8aup9I/GzMz63EPAEZImSaoBzgMW5zZGxNaIGBUREyNiIvAgcFZE1PdlkLmXoy7wYmaWHc5uOmlqaaOmqgJJ5Q7FzMz2MxHRAnwauAt4GlgYEU9KulrSWeWNrkNuOIRb/MzMsqOq3AH0N00tre7maWZmZRMRdwJ3dlp3RRf7zuqLmDpr7+rZ7MTPzCwrnOF00tTS5oqeZmZm3XBXTzOz7HHi10lTc5tb/MzMzLrRkfi5xc/MLCuc4XSys7XNUzmYmZl1o7Y6V9XTLX5mZlnhDKeTpuZWd/U0MzPrRnuLn8f4mZllhhO/TnJVPc3MzKwwd/U0M8seZziduKqnmZlZ99zV08wse5zhdJJU9fSPxczMrCtu8TMzyx5nOJ3s9HQOZmZm3fIYPzOz7HHi10lTi6t6mpmZdad9And39TQzywxnOJ00tbRSW+kfi5mZWVeqK4Xkrp5mZlniDKeTpma3+JmZmXVHErVVFU78zMwyxBlOJ00e42dmZrZHddWVNDW7q6eZWVY48evE0zmYmZntmVv8zMyypaQZjqTZkp6VtFzS5QW2T5C0RNKjkpZKOjNv25fS456V9FeljDMnItKqnk78zMzMulNbVenEz8wsQ0qW4UiqBK4DzgCmAOdLmtJpt68ACyPiWOA84Pr02Cnp8tHAbOD69Hwl1dIWtEXHxLRmZmYGPPrvcN3J0NbRtTNp8XNXTzOzrChl09aJwPKIeDEidgK3Amd32ieAYennA4E16eezgVsjoikiXgKWp+crqdybyxpX9TQzM+ugCtjwNGx8oX1VbXWF5/EzM8uQUmY444BVecsN6bp8VwEXSmoA7gQ+sxfH9rrcIHVX9TQzM8szdlryfe3S9lXu6mlmli3lznDOB34SEeOBM4GfSSo6JklzJdVLqt+wYUOPg8k9wDzGz8zMLM+oI6GyBtY+0b7KXT3NzLKlqoTnXg0clrc8Pl2X7xMkY/iIiAck1QGjijyWiJgHzAOYOXNm9DTgne2Jn8f4mVn/1dzcTENDA42NjeUOpeTq6uoYP3481dXV5Q6lz0iaDXwPqAR+FBHXdNr+f4C/A1qADcDfRsTKkgZVVQOjj+rU4lfBtsaWkl7WzMx6TykTv4eAIyRNIknazgM+0mmfvwCnAT+RNBmoI3mILQYWSPoOcChwBPDnEsYKuMXPzLKhoaGBoUOHMnHiRCSVO5ySiQg2btxIQ0MDkyZNKnc4fSKvMNq7SYY5PCRpcUQ8lbfbo8DMiHhd0sXAPwNzSh7c2Onw3G8gAqS0q6db/MzMsqJkGU5EtACfBu4Cniap3vmkpKslnZXudilwkaTHgVuAj0fiSWAh8BTwG+CSiCj50yX3APMYPzPrzxobGxk5cuSATvoAJDFy5Mj9omUzzx4Lo0XEkoh4PV18kKRXTOmNnQavvwLb1wFpcReP8TMzy4xStvgREXeSFG3JX3dF3uengFO6OPbrwNdLGV9nHVU93dXTzPq3gZ705ewv95mnUHGzk7rZ/xPAr0saUc4h05Pva5+AoWOTMX6u6mlmlhlu2sqTe4C5xc/MrGtbtmzh+uuv3+vjzjzzTLZs2VKCiPZPki4EZgLf7GJ7rxZAY8zRyfeXHwdwV08zs4xxhpOnvaunx/iZmXWpq8SvpaX7Qh933nknw4cPL1VYA0VRxc0kvQv4B+CsiGgqdKKImBcRMyNi5ujRo3seWd2BMGJie2XPpKqnW/zMzLKipF09s8ZVPc3M9uzyyy/nhRdeYMaMGVRXV1NXV8eIESN45plneO655zjnnHNYtWoVjY2NfO5zn2Pu3LkATJw4kfr6erZv384ZZ5zBW9/6Vu6//37GjRvHHXfcwaBBg8p8Z/3CHgujSToW+AEwOyLW92l0Y6d1JH4e42dmlilO/PK4qqeZZc1X//NJnlrzaq+ec8qhw7jyfUd3uf2aa65h2bJlPPbYY9x777285z3vYdmyZe2VN+fPn89BBx3Ejh07OOGEE/jABz7AyJEjdznH888/zy233MIPf/hDPvzhD3Pbbbdx4YUX9up9ZFFEtEjKFUarBObnCqMB9RGxmKRr5xDgP9IxkH+JiLO6PGlvGnsMPP0raNpGbVUlrW1BS2sbVZV+bpqZ9XdO/PLkunrWOPEzMyvaiSeeuMt0C9deey233347AKtWreL555/fLfGbNGkSM2bMAOD4449nxYoVfRZvf1dEYbR39XlQOWOnAQHrnqS2Kuk+2tTixM/MLAuc+OVxi5+ZZU13LXN95YADDmj/fO+993LPPffwwAMPMHjwYGbNmlVwOoba2tr2z5WVlezYsaNPYrUeGjst+b72CWqrkvyzqaWNA2q7OcbMzPoFZzh5Oqp6eoyfmVlXhg4dyrZt2wpu27p1KyNGjGDw4ME888wzPPjgg30cnZXUsENh0EGwdmn7s9KVPc3MssEtfnlc1dPMbM9GjhzJKaecwtSpUxk0aBBjxoxp3zZ79mxuvPFGJk+ezJFHHsnJJ59cxkit10nJfH5rn6B2fPKs9Fx+ZmbZ4MQvz86WNioEVRX73YTBZmZ7ZcGCBQXX19bW8utfF55PPDeOb9SoUSxbtqx9/WWXXdbr8VkJjZ0Gf5pHXUWS8Lmyp5lZNrhpK09TSxu1VZWkVdLMzMyss7HTobWJETtWAu7qaRlwz1fhD98udxRmZefEL09TS5srepqZmXUnLfAyYtszgFv8rJ97bSPcfy3c921o2l7uaMzKyllOnqaWVo/vMzMz687II6CqjgO3pImfx/hZf7ZsEbS1QPNr8PTickdjVlbOcvI0NbdRW+0fiZmZWZcqq+DgKQzZ/BTgrp7Wzz22IGmlPugNyWez/ZiznDxNrckYPzMzM+vG2GkM3vQUEO7qaf3X+qfh5cdgxgVwzEdgxR9g84pyR2VWNk788jQ1t7mrp5mZ2Z6MnUZl0xYOYZNb/Kz/evwWqKiCqR+EY84DBI/fWu6ozMrGWU4ej/EzM+t9Q4YMAWDNmjV88IMfLLjPrFmzqK+v78uwrCcOOQaAKRUrPMbP+qe2Vli6EN70bhgyGoYfBpNOTbp7tvl31vZPznLyuKqnmVnpHHrooSxatKjcYVhvOHgKgThaK93V0/qnF++FbS/DjPM71s24ALashL/cX7awzMrJWU6e3Dx+ZmbWtcsvv5zrrruuffmqq67ia1/7GqeddhrHHXcc06ZN44477tjtuBUrVjB16lQAduzYwXnnncfkyZM599xz2bFjR5/Fb72gdghx0BuYUrHSXT2tf3r8FqgbDm+e3bFu8nuhZqiLvNh+q6rcAfQnTc2t1A6tLXcYZmbF+/XlsPaJ3j3n2GlwxjVdbp4zZw6f//znueSSSwBYuHAhd911F5/97GcZNmwYr7zyCieffDJnnXUWkgqe44YbbmDw4ME8/fTTLF26lOOOO65378FKb+x0przyR55zV0/rbxpfhad/BTM+AlV5f9fVHABHnwPLfgFn/DPUDilfjGZl4Ba/PDtb26itdoufmVl3jj32WNavX8+aNWt4/PHHGTFiBGPHjuXLX/4y06dP513veherV69m3bp1XZ7jvvvu48ILLwRg+vTpTJ8+va/Ct15Scch0JlRs4JHnVvDyVrfYWj/y1B3QsiNJ/DqbcYHn9LP9llv88riqp5llTjctc6X0oQ99iEWLFrF27VrmzJnDzTffzIYNG3j44Yeprq5m4sSJNDY2liU26yNjk2S9efVS3vmtVi6e9UbmnvoG6vwC1crt8Vtg5Jtg3PG7b5twMoyYlHT3LJQYmg1gznLyuLiLmVlx5syZw6233sqiRYv40Ic+xNatWzn44IOprq5myZIlrFy5stvjTz31VBYsSMbZLFu2jKVLl/ZF2Nabxk4D4AfTl3PGmwbxnbuf47Rv/57/WvoyEVHm4Gy/tXkFrPwfOOZ8KNTVXEpa/Tynn+2HnOXk8XQOZmbFOfroo9m2bRvjxo3jkEMO4YILLqC+vp5p06Zx0003cdRRR3V7/MUXX8z27duZPHkyV1xxBccfX+DN/H5K0mxJz0paLunyAttrJf083f4nSRP7Pkpg6BiYcjZDnlrAdxrO40/T/pNp1Q1csuAR3nPtH/nifzzO93/3PIsfX8Pjq7aw+bWde0wIW1rbeHnrDpY2bOGlV16jsdmFY2wvPf5zQDB9Ttf7eE4/20+5q2ceV/U0MyveE090FJUZNWoUDzzwQMH9tm/fDsDEiRNZtmwZAIMGDeLWW/1HV2eSKoHrgHcDDcBDkhZHxFN5u30C2BwRb5J0HvANoJu/ckvowzfBy4/Dn+cx5olF3NhyC+sOnckvWk7hpWdqqN8h/kgNjVFDIzWEqhhSV80BddUcUFvNkLoaKivFpu1NbNreyJbXGyGCCoJWKmilgqGDBzN62GBGDz+AEUMGUVEhqoDKSqiUqJSoqaqgtrqSmuoqaqsqk89VlVRWVFAhoYoKKitEhdjlc4VEBaJCAok2yGslStZXVijdX1RVCEm7FC3Kb1TqLq9NLiEEqELpFbTLOdpPlXdOUbHL9ty1C5ZN6qKYUqHN2vUiefsUPofytufvsdstd4ovd7rcz6aYtuDOP5d83Z8nGPzYLbRMeCvrYyRNG7bT2NxKU0sbm7bvZPWWHazZsoPVW3ZwUfUxHPz7H/PPjx/K8GFDOXDoUA4afiCjhh/IsMGDqK2poq66gtrqauqqq6irrqSiMvl9qpSoSH8ncv+WhWLsKk6R/2+uPf2zFfx3K/Q7sTe6egmT/++rIn4vLFuc+KUigp0tHuNnZmZldSKwPCJeBJB0K3A2kJ/4nQ1clX5eBHxfkqJc/SsPOQbOvg7e/Y/w6M8Y89CPuPjV7yXbagrs3wa8nn51Vmj/VmBz+mVWhMvX/xW/+MaSgttqKis4dHgdfxjyV3x68zf47ubPlP13qy2KT6r25j/yKPx6oNt9izl/z2Po+f3uy73t+/UKH9/T8xY6fmvFcA678umiz7u3nPil2gL+9pRJHHf4iHKHYmZm+69xwKq85QbgpK72iYgWSVuBkcArfRJhVwYfBKd8Dt7y6WTsVPPr0NyYfG9phOYdEK1Jc0gERBuQflclqCJtFku/Rxu0tkBbC7Q1p99baf8jTB1/qLa2tdHc2kZLS2vyvbWV1tY2AmhrayMiiIC29NpB8jlZTL6LIPfnmYhkPcn2tvb9ks+5/dr/mIu8sNp/IGrfGPk7x67HdvxBGPmbO9YVaj3qYY7f1eHBHqbm2DWcdsrfoZvrFNdotHuTXt6Pd5cLqsAfzi2Vgzh+0gWcXFNHbXVF2gJcwfBB1YwbMYhRB9RSUSFoezusfCs0boWWJqL5dRp3vM727dto2rmTlpbk96ilrY3W1lZaWjt+j4Lc707hn5d2+ykVbiONXRfz1nfVZNj9z7fzWfak0L9boXNGoaXu/n0KHRkF1u1JlzfYsb7z79Tura17kZ4VvF5X/7HsTYqYf7bcz6+LNuuaIRy2h3P0hBO/VGWFuOJ9U8odhpmZWa+QNBeYCzBhwoS+u3BFJYx8Y59dTiR/zPgPGsvp/KakoIoKmHRq+6KAQemX2UDlfo1mZhm0v1RN3F/uM89q2OWF7/h0XcF9JFUBBwIbO58oIuZFxMyImDl69OgShWtmZllR0sSviMpk35X0WPr1nKQtedta87Z5lk0zs1RdXR0bN24c8ElRRLBx40bq6urKHUpfegg4QtIkSTXAeUDnZ+Bi4GPp5w8Cvyvb+D4zM8uMkvWMKKYyWUR8IW//zwDH5p1iR0TMKFV8ZmZZNX78eBoaGtiwYUO5Qym5uro6xo8fX+4w+kw6Zu/TwF1AJTA/Ip6UdDVQHxGLgR8DP5O0HNhEkhyamZl1q5Rd4oupTJbvfODKEsZjZjYgVFdXM2nSpHKHYSUSEXcCd3Zad0Xe50bgQ30dl5mZZVspu3oWqkw2rtCOkg4HJgG/y1tdJ6le0oOSzildmGZmZmZmZgNbfymCdR6wKCJa89YdHhGrJb0B+J2kJyLihfyDylaxzMzMzMzMLENK2eJXTGWynPOAW/JXRMTq9PuLwL3sOv4vt48rlpmZmZmZme2BSlUILC0x/RxwGknC9xDwkYh4stN+RwG/ASblqpJJGgG8HhFNkkYBDwBn5xeGKXC9DcDKXgh9FOWeBLd0fG/ZM1DvC3xvWdVf7u3wiPAbvyL10jOyv/zbl4LvLZt8b9nkeyutLp+PJevqWWRlMkha+27tVIp6MvADSW0krZLXdJf0pdfrlT8AJNVHxMzeOFd/43vLnoF6X+B7y6qBfG8DWW88Iwfyv73vLZt8b9nkeyufko7x21NlsnT5qgLH3Q9MK2VsZmZmZmZm+4uSTuBuZmZmZmZm5efEb3fzyh1ACfnesmeg3hf43rJqIN+bdW8g/9v73rLJ95ZNvrcyKVlxFzMzMzMzM+sf3OJnZmZmZmY2wDnxS0maLelZScslXV7ueHpC0nxJ6yUty1t3kKS7JT2ffh9Rzhj3laTDJC2R9JSkJyV9Ll2f+fuTVCfpz5IeT+/tq+n6SZL+lP5u/lxSTblj3VeSKiU9KulX6fKAuDdJKyQ9IekxSfXpusz/TgJIGi5pkaRnJD0t6S0D5d6seH5GZsNAfUb6+Zjde/PzsX/dmxM/kv/YgOuAM4ApwPmSppQ3qh75CTC707rLgd9GxBHAb9PlLGoBLo2IKcDJwCXpv9VAuL8m4J0RcQwwA5gt6WTgG8B3I+JNwGbgE2WMsac+BzydtzyQ7u0dETEjr4zzQPidBPge8JuIOAo4huTfb6DcmxXBz8hMGajPSD8fs31vfj72E078EicCyyPixYjYCdwKnF3mmPZZRNwHbOq0+mzgp+nnnwLn9GlQvSQiXo6IR9LP20j+IxvHALi/SGxPF6vTrwDeCSxK12fy3gAkjQfeA/woXRYD5N66kPnfSUkHAqcCPwaIiJ0RsYUBcG+2V/yMzIiB+oz08zG799aFTP8+Qnafj078EuOAVXnLDem6gWRMRLycfl4LjClnML1B0kTgWOBPDJD7S7t6PAasB+4GXgC2RERLukuWfzf/Bfi/QFu6PJKBc28B/LekhyXNTdcNhN/JScAG4N/SLkg/knQAA+PerHh+RmbQQHtG+vmY2Xvz87EfceK3H4qklGumy7lKGgLcBnw+Il7N35bl+4uI1oiYAYwnect+VJlD6hWS3gusj4iHyx1Libw1Io4j6Qp3iaRT8zdm+HeyCjgOuCEijgVeo1O3lQzfm1lBA+F3eiA+I/18zCw/H/sRJ36J1cBhecvj03UDyTpJhwCk39eXOZ59Jqma5IF2c0T8Il09YO4PIO0usAR4CzBcUlW6Kau/m6cAZ0laQdJN7J0kfeMHwr0REavT7+uB20n+KBkIv5MNQENE/CldXkTyoBsI92bF8zMyQwb6M9LPx2zx87F/3ZsTv8RDwBFpBaUa4DxgcZlj6m2LgY+lnz8G3FHGWPZZ2u/9x8DTEfGdvE2Zvz9JoyUNTz8PAt5NMj5jCfDBdLdM3ltEfCkixkfERJL/vn4XERcwAO5N0gGShuY+A6cDyxgAv5MRsRZYJenIdNVpwFMMgHuzveJnZEYM1Gekn4/ZvDc/H/vfvXkC95SkM0n6WFcC8yPi62UOaZ9JugWYBYwC1gFXAr8EFgITgJXAhyOi8+D2fk/SW4E/AE/Q0Rf+yyRjGDJ9f5KmkwwEriR5KbMwIq6W9AaSt4AHAY8CF0ZEU/ki7RlJs4DLIuK9A+He0nu4PV2sAhZExNcljSTjv5MAkmaQFByoAV4E/ob095OM35sVz8/IbBioz0g/H7N5b34+9r97c+JnZmZmZmY2wLmrp5mZmZmZ2QDnxM/MzMzMzGyAc+JnZmZmZmY2wDnxMzMzMzMzG+Cc+JmZmZmZmQ1wTvzMBjhJsyT9qtxxmJmZ9Td+Rtr+xImfmZmZmZnZAOfEz6yfkHShpD9LekzSDyRVStou6buSnpT0W0mj031nSHpQ0lJJt0saka5/k6R7JD0u6RFJb0xPP0TSIknPSLpZksp2o2ZmZnvJz0iznnPiZ9YPSJoMzAFOiYgZQCtwAXAAUB8RRwO/B65MD7kJ+PuImA48kbf+ZuC6iDgG+F/Ay+n6Y4HPA1OANwCnlPymzMzMeoGfkWa9o6rcAZgZAKcBxwMPpS8aBwHrgTbg5+k+/w78Qv9/+3bIokUUhQH4fS2CKIrBYtBfYfM/GNYiLGL2Fwha/BUaNxvsgmFhk8lkNG2yiKigiBzDTlCb67fr5/A8aebM5TA3XA5n7p32YpJLM7O/xPeSPGt7IcnVmXmeJDPzJUmWfK9m5nC5f53kepKDk58WAPw1NRI2QOMH26FJ9mbmwS/B9tFv4+aY+b/+dP091j4A/w81EjbAUU/YDi+T7LS9kiRtL7e9lqM1urOMuZPkYGY+JHnf9uYS302yPzMfkxy2vbXkONv23KnOAgA2T42EDfBFA7bAzLxp+zDJi7ZnknxLcj/J5yQ3lmfvcvSPQ5LcTfJkKVpvk9xb4rtJnrZ9vOS4fYrTAICNUyNhMzpz3F1x4KS1/TQz5//1ewDAtlEj4c846gkAALBydvwAAABWzo4fAADAymn8AAAAVk7jBwAAsHIaPwAAgJXT+AEAAKycxg8AAGDlfgBeR+nvFkSHOwAAAABJRU5ErkJggg==\n",
            "text/plain": [
              "<Figure size 1080x576 with 2 Axes>"
            ]
          },
          "metadata": {
            "tags": [],
            "needs_background": "light"
          }
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "IPrpkTRkP3HJ",
        "colab_type": "code",
        "outputId": "4763904b-e75a-48df-cfc3-37fb1e214a1f",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 401
        }
      },
      "source": [
        "# NOTE that flow_from_directory treats each sub-folder as a class which works fine for training data\n",
        "# Actually class_mode=None is a kind of workaround for test data which too must be kept in a subfolder\n",
        "\n",
        "# batch_size can be 1 or any factor of test dataset size to ensure that test dataset is samples just once, i.e., no data is left out\n",
        "test_generator = validate_datagen.flow_from_directory(\n",
        "    '/content/drive/My Drive/Face_D/Validation',\n",
        "    target_size = (image_size, image_size),\n",
        "    batch_size = 1,#BATCH_SIZE_TESTING,\n",
        "    class_mode = None,\n",
        "    shuffle = False,\n",
        "    seed = 123\n",
        ")\n",
        "# test generator for imposters\n",
        "test_generator_random= validate_datagen.flow_from_directory(\n",
        "    '/content/drive/My Drive/random',\n",
        "    target_size = (image_size, image_size),\n",
        "    batch_size = 1,#BATCH_SIZE_TESTING,\n",
        "    class_mode = None,\n",
        "    shuffle = False,\n",
        "    seed = 123\n",
        ")\n"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "error",
          "ename": "FileNotFoundError",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-10-10c0b6f0838f>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      9\u001b[0m     \u001b[0mclass_mode\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     10\u001b[0m     \u001b[0mshuffle\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m     \u001b[0mseed\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m123\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     12\u001b[0m )\n\u001b[1;32m     13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/keras/preprocessing/image.py\u001b[0m in \u001b[0;36mflow_from_directory\u001b[0;34m(self, directory, target_size, color_mode, classes, class_mode, batch_size, shuffle, seed, save_to_dir, save_prefix, save_format, follow_links, subset, interpolation)\u001b[0m\n\u001b[1;32m    465\u001b[0m             \u001b[0mfollow_links\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfollow_links\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    466\u001b[0m             \u001b[0msubset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msubset\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 467\u001b[0;31m             \u001b[0minterpolation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minterpolation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    468\u001b[0m         )\n\u001b[1;32m    469\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/keras/preprocessing/image.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, directory, image_data_generator, target_size, color_mode, classes, class_mode, batch_size, shuffle, seed, data_format, save_to_dir, save_prefix, save_format, follow_links, subset, interpolation, dtype)\u001b[0m\n\u001b[1;32m    148\u001b[0m             \u001b[0msubset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msubset\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    149\u001b[0m             \u001b[0minterpolation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minterpolation\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m             dtype=dtype)\n\u001b[0m\u001b[1;32m    151\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    152\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/directory_iterator.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, directory, image_data_generator, target_size, color_mode, classes, class_mode, batch_size, shuffle, seed, data_format, save_to_dir, save_prefix, save_format, follow_links, subset, interpolation, dtype)\u001b[0m\n\u001b[1;32m    113\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mclasses\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    114\u001b[0m             \u001b[0mclasses\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 115\u001b[0;31m             \u001b[0;32mfor\u001b[0m \u001b[0msubdir\u001b[0m \u001b[0;32min\u001b[0m \u001b[0msorted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlistdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdirectory\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    116\u001b[0m                 \u001b[0;32mif\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0misdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdirectory\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msubdir\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    117\u001b[0m                     \u001b[0mclasses\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msubdir\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/content/drive/My Drive/Face_D/Validation'"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Il-eDRB5-87c",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "actual_labels = test_generator.classes\n",
        "print(\"Actual Labels:\\n\",actual_labels)\n",
        "# print(test_generator.filenames)\n",
        "\n",
        "def load_model_and_weights():\n",
        "    print(\"[INFO] Loading face model and its weights...\")\n",
        "    # Loading and using saved model steps\n",
        "    # load json and create model\n",
        "    js_file = open('/content/drive/My Drive/SAVED MODELS/Face Model/model22.json', 'r')\n",
        "    loaded_model_json = js_file.read()\n",
        "    js_file.close()\n",
        "    loaded_model = model_from_json(loaded_model_json)\n",
        "    # load weights into new loaded model\n",
        "    loaded_model.load_weights('/content/drive/My Drive/SAVED MODELS/Face Model/best_face.h5')\n",
        "    print(\"Loaded face model with its weights from drive\")\n",
        "    return loaded_model\n",
        "\n",
        "# load the model and weights from directory\n",
        "loaded_model= load_model_and_weights()\n",
        "\n",
        "#pred = model.predict_generator(test_generator, steps = len(test_generator), verbose = 1)\n",
        "\n",
        "# the loaded model predictions\n",
        "pred = loaded_model.predict_generator(test_generator, steps = len(test_generator), verbose = 1)\n",
        "\n",
        "random_pred=loaded_model.predict_generator(test_generator_random, steps = len(test_generator_random), verbose = 1)\n",
        "predicted_class_indices = np.argmax(pred, axis = 1)\n",
        " \n",
        " # confusion matrix, all corrected classified classes are in the diagonal entries of the matrix\n",
        " # the colomuns are actual labels vs Rows as predicted Labels\n",
        "\n",
        "cm = confusion_matrix(actual_labels,predicted_class_indices)\n",
        "print(cm)\n",
        "\n",
        "\n",
        "# print(\"Labels:\\n\")\n",
        "# import tensorflow.keras\n",
        "# np_utils.probas_to_classes\n",
        "# class_names= keras.np_utils.probas_to_classes(pred)\n",
        "# predicted_labels = y_proba.argmax(axis=-1)\n",
        "# print(class_names)\n",
        "for pre in pred:\n",
        "    print(np.argmax(pre))\n",
        "    "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "wZsZUYKRMzrj",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "print(pred.shape)\n",
        "\n",
        "# FAR calculator\n",
        "tresh=[]\n",
        "percent=[]\n",
        "treshold=0\n",
        "while  treshold <100:\n",
        "    accepted_reg=0\n",
        "    for registered_index in pred:\n",
        "      counter_reg=0\n",
        "      array_reg=np.array([0.0])\n",
        "      max_confidence_reg = array_reg.astype(type('float32', (float,), {}))\n",
        "      max_confIndex_reg=0\n",
        "      for j in registered_index:\n",
        "        if (j > max_confidence_reg):\n",
        "          max_confidence_reg=j\n",
        "          max_confIndex_reg=counter_reg\n",
        "        counter_reg=counter_reg+1\n",
        "      if(max_confidence_reg > treshold*0.01):\n",
        "        accepted_reg+=1\n",
        "    tresh.append(treshold)\n",
        "    percent.append((80-accepted_reg)/80)\n",
        "    treshold+=0.01\n",
        "\n",
        "\n",
        "tresh_imp=[]\n",
        "percent_imp=[]\n",
        "treshold_imp=0\n",
        "while  treshold_imp <100:\n",
        "    accepted_imp=0\n",
        "    for imp_index in random_pred:\n",
        "      counter_imp=0\n",
        "      array_imp=np.array([0.0])\n",
        "      max_confidence_imp = array_imp.astype(type('float32', (float,), {}))\n",
        "      max_confIndex_imp=0\n",
        "      for j in imp_index:\n",
        "        if (j > max_confidence_imp):\n",
        "          max_confidence_imp=j\n",
        "          max_confIndex_imp=counter_imp\n",
        "        counter_imp+=1\n",
        "      if(max_confidence_imp > treshold_imp*0.01):\n",
        "        accepted_imp+=1\n",
        "    tresh_imp.append(treshold_imp)\n",
        "    percent_imp.append((accepted_imp)/80)\n",
        "    treshold_imp+=0.01\n",
        "\n",
        "\n",
        "equal_tresh=[]\n",
        "equal_percent=[]\n",
        "for i in range(0,len(tresh_imp)):\n",
        "  if (percent_imp[i] == percent[i]):\n",
        "     equal_tresh.append(tresh_imp[i])\n",
        "     equal_percent.append(percent_imp[i])\n",
        "\n",
        "print(\"Equal Treshold :\",equal_tresh)\n",
        "plt.plot(tresh,percent)\n",
        "plt.plot(tresh_imp,percent_imp)\n",
        "plt.plot(equal_tresh,equal_percent)\n",
        "plt.title('Error Rate')  \n",
        "plt.ylabel('percentage')  \n",
        "plt.xlabel('treshold level')  \n",
        "plt.legend(['FRR', 'FAR','EER']) \n",
        "plt.show()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "RVRRocwQqbgc",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# correct labeling function\n",
        "def Correct_Labeling():\n",
        "    import numpy as np\n",
        "    count=0\n",
        "    for indexx in pred:\n",
        "      kk=0\n",
        "      arrayedd=np.array([0.0])\n",
        "      maxmm = arrayedd.astype(type('float32', (float,), {}))\n",
        "      maxm_indexx=0\n",
        "      for j in indexx:\n",
        "        if (j > maxmm):\n",
        "          maxmm=j\n",
        "          maxm_indexx=kk\n",
        "        kk=kk+1\n",
        "      print(\"\\n\",maxm_indexx,maxmm)\n",
        "      if maxmm < 0.80:\n",
        "        count+=1\n",
        "      print(count)\n",
        "\n",
        "\n",
        "Correct_Labeling()\n",
        "\n",
        "    "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ttuoDSKkGBKi",
        "colab_type": "text"
      },
      "source": [
        "*`**`Iris Biometric System Starts from here`**`*"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "DNFb_ak5GKPi",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        ""
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "K9h6IwCAKCtC",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/drive')"
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}