{ "cells": [ { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Facial Expression Embedding" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2023-04-19 12:14:54.399884: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F AVX512_VNNI FMA\n", "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" ] } ], "source": [ "import numpy as np\n", "import os\n", "import random\n", "from typing import *\n", "import tensorflow as tf\n", "from pathlib import Path\n", "from tensorflow.keras import applications\n", "from tensorflow.keras import layers\n", "from tensorflow.keras import losses\n", "from tensorflow.keras import optimizers\n", "from tensorflow.keras import metrics\n", "from tensorflow.keras import Model\n", "from tensorflow.keras.applications import resnet\n", "import pandas as pd\n", "import mediapipe as mp\n", "import plotly.express as px\n", "from plotly.subplots import make_subplots\n", "import plotly.graph_objects as go\n", "import requests\n", "from tqdm import tqdm\n", "import base64\n", "from concurrent.futures import ThreadPoolExecutor, as_completed\n", "from retrying import retry\n", "import swifter\n" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Dataset Loading" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "IMAGE_DIR = \"images\"\n", "TRAINING_DATASET = \"training_dataset\"" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "@retry(stop_max_attempt_number=3)\n", "def image_downloader(url: str):\n", " get_name = lambda url: base64.urlsafe_b64encode(url.encode()).decode()\n", " Path(IMAGE_DIR).mkdir(exist_ok=True)\n", " filename = get_name(url)\n", " if os.path.exists(os.path.join(IMAGE_DIR, filename)):\n", " return filename\n", " res = requests.get(url, timeout=10)\n", " if not res.ok:\n", " return None\n", " with open(os.path.join(IMAGE_DIR, filename), \"wb\") as f:\n", " f.write(res.content)\n", " return filename\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "def get_column_names():\n", " names = []\n", " for i in range(1, 4):\n", " names += [\n", " f\"img{i}_url\",\n", " f\"img{i}_tl_col\",\n", " f\"img{i}_br_col\",\n", " f\"img{i}_tl_row\",\n", " f\"img{i}_br_row\",\n", " ]\n", " names += [\"triplet_type\"]\n", " for i in range(6):\n", " names += [f\"annotator{i+1}_id\", f\"annotation{i+1}\"]\n", " return names\n", "\n", "\n", "def get_local_storage_column_names():\n", " names = []\n", " for i in range(1, 4):\n", " names += [\n", " f\"img{i}_id\",\n", " f\"img{i}_tl_col\",\n", " f\"img{i}_br_col\",\n", " f\"img{i}_tl_row\",\n", " f\"img{i}_br_row\",\n", " ]\n", " names += [\"triplet_type\"]\n", " names += [\"annotator1_id\"]\n", " names += [\"annotation\"]\n", " return names\n", "\n", "\n", "def get_label(annotations: pd.Series):\n", " def mode(x):\n", " s = pd.Series(x)\n", " if s.value_counts(normalize=True).max() < 0.5:\n", " return np.nan\n", " return s.mode().at[0]\n", "\n", " return annotations.swifter.apply(mode)\n", "\n", "\n", "def fecnet_dataset_loader(dataset_csv: str):\n", " if isinstance(dataset_csv, bytes):\n", " dataset_csv = dataset_csv.decode()\n", " df = pd.read_csv(\n", " dataset_csv, header=None, names=get_column_names(), nrows=10000\n", " ) # TODO: remove nrows\n", "\n", " # download images\n", " df[\"img1_url\"] = df[\"img1_url\"].swifter.apply(image_downloader)\n", " df[\"img2_url\"] = df[\"img2_url\"].swifter.apply(image_downloader)\n", " df[\"img3_url\"] = df[\"img3_url\"].swifter.apply(image_downloader)\n", " df.dropna(subset=[\"img1_url\", \"img2_url\", \"img3_url\"], inplace=True)\n", "\n", " # determine label\n", " df[\"label\"] = get_label(\n", " pd.Series(df[[f\"annotation{i}\" for i in range(1, 7)]].values.tolist())\n", " )\n", " df.dropna(subset=[\"label\"], inplace=True)\n", "\n", " samples = {\n", " \"img1\": [],\n", " \"img1_box\": [],\n", " \"img2\": [],\n", " \"img2_box\": [],\n", " \"img3\": [],\n", " \"img3_box\": [],\n", " }\n", "\n", " for _, row in df.iterrows():\n", " img1_idx, img2_idx, img3_idx = 1, 2, 3\n", " if row.label == 1:\n", " img1_idx, img3_idx = img3_idx, img1_idx\n", " elif row.label == 2:\n", " img2_idx, img3_idx = img3_idx, img2_idx\n", " bounding_boxes = (\n", " (\n", " (row[f\"img{img1_idx}_tl_col\"], row[f\"img{img1_idx}_tl_row\"]),\n", " (row[f\"img{img1_idx}_br_col\"], row[f\"img{img1_idx}_br_row\"]),\n", " ),\n", " (\n", " (row[f\"img{img2_idx}_tl_col\"], row[f\"img{img2_idx}_tl_row\"]),\n", " (row[f\"img{img2_idx}_br_col\"], row[f\"img{img2_idx}_br_row\"]),\n", " ),\n", " (\n", " (row[f\"img{img3_idx}_tl_col\"], row[f\"img{img3_idx}_tl_row\"]),\n", " (row[f\"img{img3_idx}_br_col\"], row[f\"img{img3_idx}_br_row\"]),\n", " ),\n", " )\n", " samples[\"img1\"].append(row[f\"img{img1_idx}_url\"])\n", " samples[\"img1_box\"].append(bounding_boxes[0])\n", " samples[\"img2\"].append(row[f\"img{img2_idx}_url\"])\n", " samples[\"img2_box\"].append(bounding_boxes[1])\n", " samples[\"img3\"].append(row[f\"img{img3_idx}_url\"])\n", " samples[\"img3_box\"].append(bounding_boxes[2])\n", " return samples\n" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "def extract_landmarks(image):\n", " with mp.solutions.face_mesh.FaceMesh(\n", " static_image_mode=True,\n", " max_num_faces=1,\n", " refine_landmarks=True,\n", " min_detection_confidence=0.5,\n", " ) as face_mesh:\n", " results = face_mesh.process(image.numpy())\n", " if results.multi_face_landmarks:\n", " landmarks = results.multi_face_landmarks[0]\n", " landmarks = np.array(\n", " [[lm.x, lm.y, lm.z] for lm in landmarks.landmark], dtype=np.float32\n", " )\n", " landmarks = landmarks.flatten()\n", " else:\n", " landmarks = np.zeros(478 * 3, dtype=np.float32)\n", " return landmarks\n", "\n", "\n", "def preprocess_image(filename: str, tl: Tuple[float, float], br: Tuple[float, float]):\n", " image_string = tf.io.read_file(tf.strings.join([IMAGE_DIR, \"/\", filename]))\n", " image = tf.image.decode_jpeg(image_string, channels=3)\n", " image = tf.image.convert_image_dtype(image, tf.uint8)\n", "\n", " # crop image\n", " tl = tf.cast(tf.multiply(tl, tf.cast(tf.shape(image)[:2][::-1], tf.float32)), tf.int32)\n", " br = tf.cast(tf.multiply(br, tf.cast(tf.shape(image)[:2][::-1], tf.float32)), tf.int32)\n", " image = tf.image.crop_to_bounding_box(\n", " image, tl[1], tl[0], br[1] - tl[1], br[0] - tl[0]\n", " )\n", "\n", " # extract landmarks using facemesh\n", " return tf.py_function(extract_landmarks, [image], tf.float32)\n", " # return image\n", "\n", "\n", "ImgType = Tuple[str, Tuple[float, float], Tuple[float, float]]\n", "\n", "\n", "def preprocess_triplets(triplet: dict):\n", " anchor: ImgType = (triplet[\"img1\"], triplet[\"img1_box\"][0], triplet[\"img1_box\"][1])\n", " positive: ImgType = (\n", " triplet[\"img2\"],\n", " triplet[\"img2_box\"][0],\n", " triplet[\"img2_box\"][1],\n", " )\n", " negative: ImgType = (\n", " triplet[\"img3\"],\n", " triplet[\"img3_box\"][0],\n", " triplet[\"img3_box\"][1],\n", " )\n", " return (\n", " preprocess_image(*anchor),\n", " preprocess_image(*positive),\n", " preprocess_image(*negative),\n", " )\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "dc545e1717044afd9ddc0b32fecd72c3", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Pandas Apply: 0%| | 0/10000 [00:00\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0msiamese_model\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSiameseModel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msiamese_network\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0msiamese_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptimizers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAdam\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0.0005\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweighted_metrics\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"accuracy\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0msiamese_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_dataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m40\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalidation_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mval_dataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/keras/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 63\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 65\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 66\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 67\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[1;32m 1703\u001b[0m \u001b[0muse_multiprocessing\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0muse_multiprocessing\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1704\u001b[0m \u001b[0mreturn_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1705\u001b[0;31m \u001b[0m_use_cached_eval_dataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1706\u001b[0m )\n\u001b[1;32m 1707\u001b[0m val_logs = {\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/keras/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 63\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 65\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 66\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 67\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, x, y, batch_size, verbose, sample_weight, steps, callbacks, max_queue_size, workers, use_multiprocessing, return_dict, **kwargs)\u001b[0m\n\u001b[1;32m 2038\u001b[0m ):\n\u001b[1;32m 2039\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_test_batch_begin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2040\u001b[0;31m \u001b[0mtmp_logs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtest_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2041\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshould_sync\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2042\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masync_wait\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 878\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 879\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_jit_compile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 880\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 881\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 882\u001b[0m \u001b[0mnew_tracing_count\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental_get_tracing_count\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m_call\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 917\u001b[0m \u001b[0;31m# In this case we have not created variables on the first call. So we can\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 918\u001b[0m \u001b[0;31m# run the first trace but we should fail if variables are created.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 919\u001b[0;31m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variable_creation_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 920\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_created_variables\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mALLOW_DYNAMIC_VARIABLE_CREATION\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 921\u001b[0m raise ValueError(\"Creating variables on a non-first call to a function\"\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/tensorflow/python/eager/polymorphic_function/tracing_compiler.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 133\u001b[0m filtered_flat_args) = self._maybe_define_function(args, kwargs)\n\u001b[1;32m 134\u001b[0m return concrete_function._call_flat(\n\u001b[0;32m--> 135\u001b[0;31m filtered_flat_args, captured_inputs=concrete_function.captured_inputs) # pylint: disable=protected-access\n\u001b[0m\u001b[1;32m 136\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 137\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/tensorflow/python/eager/polymorphic_function/monomorphic_function.py\u001b[0m in \u001b[0;36m_call_flat\u001b[0;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[1;32m 1744\u001b[0m \u001b[0;31m# No tape is watching; skip to running the function.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1745\u001b[0m return self._build_call_outputs(self._inference_function.call(\n\u001b[0;32m-> 1746\u001b[0;31m ctx, args, cancellation_manager=cancellation_manager))\n\u001b[0m\u001b[1;32m 1747\u001b[0m forward_backward = self._select_forward_and_backward_functions(\n\u001b[1;32m 1748\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/tensorflow/python/eager/polymorphic_function/monomorphic_function.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[1;32m 381\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 382\u001b[0m \u001b[0mattrs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mattrs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 383\u001b[0;31m ctx=ctx)\n\u001b[0m\u001b[1;32m 384\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 385\u001b[0m outputs = execute.execute_with_cancellation(\n", "\u001b[0;32m~/Git/FacialExpressionSyncService/.venv/lib/python3.7/site-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36mquick_execute\u001b[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001b[0;32m---> 53\u001b[0;31m inputs, attrs, num_outputs)\n\u001b[0m\u001b[1;32m 54\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } ], "source": [ "siamese_model = SiameseModel(siamese_network)\n", "siamese_model.compile(optimizer=optimizers.Adam(0.0005), weighted_metrics=[\"accuracy\"])\n", "siamese_model.fit(train_dataset, epochs=40, validation_data=val_dataset)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Evaluate Model" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "test_df = fecnet_dataset_loader(\"data/faceexp-comparison-data-test-public.csv\")\n", "\n", "test_dataset = tf.data.Dataset.from_tensor_slices(\n", " test_df\n", ")\n", "\n", "test_dataset = test_dataset.map(preprocess_triplets)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "siamese_model.evaluate(test_dataset)" ] }, { "cell_type": "code", "execution_count": 168, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Positive similarity: 0.96082336\n", "Negative similarity 0.85784876\n", "Positive-Negative similarity 0.8386482\n" ] } ], "source": [ "sample = next(iter(train_dataset))\n", "# visualise_face_mesh_triplet(*sample)\n", "\n", "anchor, positive, negative = sample\n", "anchor_embedding, positive_embedding, negative_embedding = (\n", " embedding(tf.reshape(anchor, (-1, 478,3))),\n", " embedding(tf.reshape(positive, (-1, 478,3))),\n", " embedding(tf.reshape(negative, (-1, 478,3))),\n", ")\n", "cosine_similarity = metrics.CosineSimilarity()\n", "\n", "positive_similarity = cosine_similarity(anchor_embedding, positive_embedding)\n", "print(\"Positive similarity:\", positive_similarity.numpy())\n", "\n", "negative_similarity = cosine_similarity(anchor_embedding, negative_embedding)\n", "print(\"Negative similarity\", negative_similarity.numpy())\n", "\n", "positive_negative_similarity = cosine_similarity(positive_embedding, negative_embedding)\n", "print(\"Positive-Negative similarity\", positive_negative_similarity.numpy())" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO: Created TensorFlow Lite XNNPACK delegate for CPU.\n" ] } ], "source": [ "# tf.data.Dataset.save(dataset, \"training_dataset\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.9" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 }