{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "1VQMOv428DfA",
        "outputId": "24747612-50eb-4260-cb22-79a9c80fddd1"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Mounted at /content/drive\n"
          ]
        }
      ],
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/drive')"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "bf06Dl-pPoWP",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "b5fec6bf-5fb6-42d0-ad7c-9761e1da3cee"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "\u001b[?25l     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/242.5 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K     \u001b[91m━━━━━━\u001b[0m\u001b[91m╸\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.0/242.5 kB\u001b[0m \u001b[31m986.5 kB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K     \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m\u001b[90m━\u001b[0m \u001b[32m235.5/242.5 kB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m242.5/242.5 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h"
          ]
        }
      ],
      "source": [
        "pip install -q tensorflow-model-optimization"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "5bXIvsnVQm0v"
      },
      "outputs": [],
      "source": [
        "import sys\n",
        "sys.path.append('/content/drive/MyDrive/deep_learning_quantized')\n",
        "\n",
        "import pandas as pd\n",
        "import numpy as np\n",
        "import torch\n",
        "import torch.nn as nn\n",
        "import architectures\n",
        "import network_training\n",
        "import preprocessing"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "F0GDzAhFrvHV"
      },
      "outputs": [],
      "source": [
        "def read_data_from_csv(csv_file_name):\n",
        "      data = pd.read_csv(csv_file_name)\n",
        "      x_data = data.iloc[:, :-2].values\n",
        "      y_data = data.iloc[:, -2].values\n",
        "      subj = data.iloc[:, -1].values\n",
        "\n",
        "      return x_data, y_data, subj"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "eCyTR0urJoJw"
      },
      "outputs": [],
      "source": [
        "csv_file_name = '/content/drive/MyDrive/deep_learning_quantized/multimodal_data.csv'\n",
        "labels = ('low', 'medium', 'high')\n",
        "avg = True\n",
        "fs = 64\n",
        "step_size = 8\n",
        "\n",
        "# csv_file_name = '/content/drive/MyDrive/deep_learning_quantized/combined_data.csv'\n",
        "# labels = ('rest', 'squat', 'step')\n",
        "# avg = False\n",
        "# fs = 400\n",
        "# step_size = 32\n",
        "\n",
        "features, targets, subj_data = read_data_from_csv(csv_file_name)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "W0delxoDwp0D",
        "outputId": "89ac8eb7-967f-47cb-ce47-014de2f1294b"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "[[ -18.68]\n",
            " [ -17.01]\n",
            " [ -14.2 ]\n",
            " ...\n",
            " [-116.5 ]\n",
            " [-116.5 ]\n",
            " [-113.31]]\n",
            "[ 1.64195064e-16 -5.50153869e-03 -1.00857702e-02 ...  1.03032917e-02\n",
            "  9.13208838e-03  8.07195245e-03]\n"
          ]
        }
      ],
      "source": [
        "# features = features[:, 0:4]\n",
        "features = features[:, 0].reshape(-1, 1)\n",
        "# features = features[:, 1:4]\n",
        "\n",
        "print(features)\n",
        "print(targets)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "wTv6Q3Ubawlw"
      },
      "outputs": [],
      "source": [
        "params = {\n",
        "    \"seed\" : 256,\n",
        "    \"learning_rate\" : 0.002,\n",
        "    \"weight_decay\" : 1e-6,\n",
        "    \"step_size\" : 3,\n",
        "    \"gamma\" : 0.8,\n",
        "    \"batch_size\" : 128,\n",
        "    \"epochs\" : 30,\n",
        "    \"num_resblocks\" : 1,\n",
        "}\n",
        "\n",
        "params[\"window_size\"] = 256 #fs * 4\n",
        "params[\"overlap\"] = params[\"window_size\"] * 7//8"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "zzwYFHnv_J4I",
        "outputId": "a14a705e-37ee-4b9e-d34b-e00180eae892"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "(326120, 1) (326120,)\n",
            "(10184, 256, 1)\n"
          ]
        }
      ],
      "source": [
        "from scipy import fft\n",
        "import matplotlib.pyplot as plt\n",
        "import numpy as np\n",
        "\n",
        "# scaled_data = preprocessing.scale_data(features, targets)\n",
        "scaled_data = features\n",
        "# scaled_data = features / np.max(features)\n",
        "# scaled_data = preprocessing.resample_signal(scaled_data, 400, 20)\n",
        "scaled_data = scaled_data[::step_size]\n",
        "targ = targets[::step_size]\n",
        "print(scaled_data.shape, targ.shape)\n",
        "\n",
        "sliding_X_data, sliding_y_data = preprocessing.apply_sliding_window(scaled_data, targ, subj_data, params[\"window_size\"], params[\"overlap\"], avg)\n",
        "\n",
        "X_data = sliding_X_data.astype(np.float32)\n",
        "y_data = sliding_y_data.astype(np.uint8)\n",
        "\n",
        "# print(X_data.shape, y_data.shape)\n",
        "# plt.figure(figsize=(6, 4))\n",
        "# plt.subplot(2, 1, 1)\n",
        "# plt.plot(X_data[0, :], color='blue')\n",
        "# plt.xlabel('n')\n",
        "# plt.ylabel('x(n)')\n",
        "# plt.subplot(2, 1, 2)\n",
        "# plt.stem(np.abs(fft.fft(X_data[0, :], axis=0))[:params[\"window_size\"]//2], 'r')\n",
        "# plt.xlabel('m')\n",
        "# plt.ylabel('$|X_m|$')\n",
        "\n",
        "# plt.savefig('original_signal.eps', format='eps')\n",
        "plt.show()\n",
        "\n",
        "print(X_data.shape)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ItuhNLcn_3VR"
      },
      "outputs": [],
      "source": [
        "params[\"num_channels\"] = X_data.shape[2]\n",
        "params[\"num_classes\"] = len(labels)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "j0NGC9niUIAf"
      },
      "outputs": [],
      "source": [
        "import tensorflow as tf\n",
        "from tensorflow.keras import layers, models\n",
        "\n",
        "def create_functional_resnet(input_shape, num_classes, num_resblocks):\n",
        "    inputs = layers.Input(shape=input_shape)\n",
        "\n",
        "    # x = layers.MaxPooling2D(pool_size=(3, 1), strides=(2, 1), padding='same')(inputs)\n",
        "    x = layers.Conv2D(4, kernel_size=(3, 1), strides=(2, 1), padding='same', use_bias=False)(inputs)\n",
        "\n",
        "    x = layers.Conv2D(16, kernel_size=(5, 1), strides=(4, 1), padding='same', use_bias=False)(x)\n",
        "    x = layers.BatchNormalization()(x)\n",
        "    x = layers.ReLU()(x)\n",
        "\n",
        "    x = layers.Conv2D(32, kernel_size=(3, 1), strides=(2, 1), padding='same', use_bias=False)(x)\n",
        "    x = layers.BatchNormalization()(x)\n",
        "    x = layers.ReLU()(x)\n",
        "\n",
        "    x = layers.Conv2D(64, kernel_size=(3, 1), strides=(2, 1), padding='same', use_bias=False)(x)\n",
        "    x = layers.BatchNormalization()(x)\n",
        "    x = layers.ReLU()(x)\n",
        "\n",
        "    for _ in range(num_resblocks):\n",
        "        shortcut = x\n",
        "        x = layers.Conv2D(64, kernel_size=(1, 1), strides=(1, 1), padding='same', use_bias=False)(x)\n",
        "        x = layers.BatchNormalization()(x)\n",
        "        x = layers.ReLU()(x)\n",
        "\n",
        "        x = layers.DepthwiseConv2D(kernel_size=(3, 1), strides=(1, 1), padding='same', use_bias=False)(x)\n",
        "        x = layers.BatchNormalization()(x)\n",
        "        x = layers.ReLU()(x)\n",
        "\n",
        "        x = layers.Conv2D(64, kernel_size=(1, 1), strides=(1, 1), padding='same', use_bias=False)(x)\n",
        "        x = layers.BatchNormalization()(x)\n",
        "        x = layers.ReLU()(x)\n",
        "\n",
        "        x = layers.Add()([x, shortcut])\n",
        "\n",
        "    x = layers.GlobalAveragePooling2D()(x)\n",
        "    x = layers.Dropout(0.2)(x)\n",
        "    outputs = layers.Dense(num_classes, activation='softmax')(x)\n",
        "\n",
        "    model = models.Model(inputs=inputs, outputs=outputs)\n",
        "\n",
        "    return model"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xkqsfrA1Ut8I"
      },
      "outputs": [],
      "source": [
        "from sklearn.model_selection import train_test_split\n",
        "import tensorflow as tf\n",
        "from keras.models import Sequential\n",
        "import keras.optimizers\n",
        "from keras.metrics import Precision, Recall\n",
        "from sklearn.metrics import f1_score\n",
        "import os\n",
        "import time\n",
        "from keras.callbacks import LearningRateScheduler\n",
        "from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n",
        "import tensorflow_model_optimization as tfmot\n",
        "\n",
        "def train_model(params, X_train, y_train, X_test, y_test, pruning = False):\n",
        "    def lr_schedule(epoch, lr):\n",
        "      if epoch % params['step_size'] == 0 and epoch != 0:\n",
        "        return lr * params['gamma']\n",
        "      return lr\n",
        "\n",
        "    lr_scheduler = LearningRateScheduler(lr_schedule)\n",
        "    optimizer = keras.optimizers.AdamW(learning_rate=params['learning_rate'])\n",
        "\n",
        "    precision = Precision()\n",
        "    recall = Recall()\n",
        "\n",
        "    def f1_metric(y_true, y_pred):\n",
        "        precision_value = precision(y_true, y_pred)\n",
        "        recall_value = recall(y_true, y_pred)\n",
        "        return 2 * ((precision_value * recall_value) / (precision_value + recall_value + 1e-7))\n",
        "\n",
        "    input_shape = X_train.shape[1:]\n",
        "    model = create_functional_resnet(input_shape, params['num_classes'], params['num_resblocks'])\n",
        "    print(model.summary())\n",
        "\n",
        "    model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n",
        "                  metrics=['accuracy'])\n",
        "\n",
        "    if pruning:\n",
        "      model_for_pruning = network_training.get_pruned_model(model, X_data, params)\n",
        "\n",
        "      callbacks = [\n",
        "          tfmot.sparsity.keras.UpdatePruningStep(),\n",
        "          lr_scheduler\n",
        "        ]\n",
        "\n",
        "      model_for_pruning.compile(optimizer=optimizer, loss='categorical_crossentropy',\n",
        "                    metrics=['accuracy'])\n",
        "\n",
        "      model_for_pruning.fit(X_train, y_train, batch_size=params['batch_size'],\n",
        "                epochs=params['epochs'], verbose=1, validation_data=(X_test, y_test),\n",
        "                callbacks=callbacks)\n",
        "\n",
        "    else:\n",
        "      model.fit(X_train, y_train, batch_size=params['batch_size'],\n",
        "              epochs=params['epochs'], verbose=1, validation_data=(X_test, y_test),\n",
        "              callbacks=[lr_scheduler])\n",
        "      model_for_pruning = model;\n",
        "\n",
        "    start_time = time.time()\n",
        "    y_pred = model.predict(X_test)\n",
        "    y_pred_classes = np.argmax(y_pred, axis=1)\n",
        "    y_true_classes = np.argmax(y_test, axis=1)\n",
        "    non_quantized_time = time.time() - start_time\n",
        "\n",
        "    accuracy = accuracy_score(y_true_classes, y_pred_classes)\n",
        "    precision = precision_score(y_true_classes, y_pred_classes, average='macro')\n",
        "    recall = recall_score(y_true_classes, y_pred_classes, average='macro')\n",
        "    f1 = f1_score(y_true_classes, y_pred_classes, average='macro')\n",
        "\n",
        "    return model, accuracy, f1, precision, recall, non_quantized_time"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1CSNhfY2Qnhx"
      },
      "outputs": [],
      "source": [
        "import tensorflow_model_optimization as tfmot\n",
        "from sklearn.model_selection import KFold\n",
        "\n",
        "def training_loop(X_data, y_data, params, crossvalid = False):\n",
        "  if crossvalid:\n",
        "    num_folds = 5\n",
        "    kf = KFold(n_splits=num_folds, shuffle=True, random_state = params[\"seed\"])\n",
        "    data_splits = [(X_data[train_ind], X_data[val_ind], y_data[train_ind], y_data[val_ind]) for train_ind, val_ind in kf.split(X_data)]\n",
        "  else:\n",
        "    num_folds = 1\n",
        "    X_train, X_val, y_train, y_val = network_training.split_data(X_data, y_data, train_size=0.8)\n",
        "    data_splits = [(X_train, X_val, y_train, y_val)]\n",
        "\n",
        "  total_accuracy = []\n",
        "  total_f1_score = []\n",
        "  total_precision = []\n",
        "  total_recall = []\n",
        "  total_non_quantized_time = []\n",
        "\n",
        "  total_accuracy_quant = []\n",
        "  total_f1_score_quant = []\n",
        "  total_precision_quant = []\n",
        "  total_recall_quant = []\n",
        "  total_quantized_time = []\n",
        "\n",
        "  for fold, (X_train_fold, X_val_fold, y_train_fold, y_val_fold) in enumerate(data_splits):\n",
        "      print(f\"Fold {fold+1}/{num_folds}\")\n",
        "\n",
        "      y_train_fold = keras.utils.to_categorical(y_train_fold, num_classes=params['num_classes'])\n",
        "      y_val_fold = keras.utils.to_categorical(y_val_fold, num_classes=params['num_classes'])\n",
        "\n",
        "      X_train_fold = np.expand_dims(X_train_fold, axis=-1)\n",
        "      X_val_fold = np.expand_dims(X_val_fold, axis=-1)\n",
        "\n",
        "      model, accuracy, f1_score, precision, recall, non_quantized_time = train_model(params, X_train_fold, y_train_fold, X_val_fold, y_val_fold, pruning=False)\n",
        "\n",
        "      tflite_quant_model = network_training.get_quantized_model(model, X_train_fold)\n",
        "      interpreter = network_training.get_tflite_interpreter(tflite_quant_model)\n",
        "      accuracy_quant, f1_score_quant, precision_quant, recall_quant, quantized_time = network_training.evaluate_quantized_metrics(interpreter, X_val_fold, y_val_fold)\n",
        "\n",
        "      start_time = time.time()\n",
        "      score = model.evaluate(X_val_fold, y_val_fold, verbose=0)\n",
        "      non_quantized_time = time.time() - start_time\n",
        "\n",
        "      print(f\"\"\"Non-quantized: acc - {accuracy}, f1 - {f1_score}, prec - {precision}, rec - {recall}\"\"\")\n",
        "      print(f\"\"\"Quantized: acc - {accuracy_quant}, f1 - {f1_score_quant}, prec - {precision_quant}, rec - {quantized_time}\"\"\")\n",
        "      print(f\"\"\"Non-quantized time: {non_quantized_time}; Quantized time: {quantized_time} \"\"\")\n",
        "\n",
        "      if fold == 0:\n",
        "        # print(model.summary())\n",
        "        non_quantized_model_size, quantized_model_size =  network_training.compare_model_sizes(tflite_quant_model, model)\n",
        "        print(f\"\"\"Non-quantized size: {non_quantized_model_size}; Quantized size: {quantized_model_size} KB \"\"\")\n",
        "\n",
        "      total_accuracy.append(accuracy)\n",
        "      total_f1_score.append(f1_score)\n",
        "      total_precision.append(precision)\n",
        "      total_recall.append(recall)\n",
        "      total_non_quantized_time.append(non_quantized_time)\n",
        "\n",
        "      total_accuracy_quant.append(accuracy_quant)\n",
        "      total_f1_score_quant.append(f1_score_quant)\n",
        "      total_precision_quant.append(precision_quant)\n",
        "      total_recall_quant.append(recall_quant)\n",
        "      total_quantized_time.append(quantized_time)\n",
        "\n",
        "  mean_accuracies = np.mean(total_accuracy)\n",
        "  mean_f1_scores = np.mean(total_f1_score)\n",
        "  mean_precisions = np.mean(total_precision)\n",
        "  mean_recalls = np.mean(total_recall)\n",
        "  mean_times = np.mean(total_non_quantized_time)\n",
        "\n",
        "  mean_accuracies_quant = np.mean(total_accuracy_quant)\n",
        "  mean_f1_scores_quant = np.mean(total_f1_score_quant)\n",
        "  mean_precisions_quant = np.mean(total_precision_quant)\n",
        "  mean_recalls_quant = np.mean(total_recall_quant)\n",
        "  mean_times_quant = np.mean(total_quantized_time)\n",
        "  print(f\"\"\"Non-quantized:  acc - {mean_accuracies*100:.2f} F1 - {mean_f1_scores*100:.2f}\n",
        "        prec - {mean_precisions*100:.2f} rec - {mean_recalls*100:.2f}\n",
        "        time [ms] - {mean_times*1000:.2f} \"\"\")\n",
        "\n",
        "  print(f\"\"\"Quantized:  acc - {mean_accuracies_quant*100:.2f} F1 - {mean_f1_scores_quant*100:.2f}\n",
        "        prec - {mean_precisions_quant*100:.2f} rec - {mean_recalls_quant*100:.2f}\n",
        "        time [ms] - {mean_times_quant*1000:.2f} \"\"\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "VOMKMmuvQ-YT",
        "outputId": "bf99a363-2b33-47a6-b8cc-55244fc84216"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Fold 1/1\n",
            "Model: \"model\"\n",
            "_________________________________________________________________\n",
            " Layer (type)                Output Shape              Param #   \n",
            "=================================================================\n",
            " input_1 (InputLayer)        [(None, 256, 1, 1)]       0         \n",
            "                                                                 \n",
            " conv2d (Conv2D)             (None, 128, 1, 4)         12        \n",
            "                                                                 \n",
            " conv2d_1 (Conv2D)           (None, 32, 1, 16)         320       \n",
            "                                                                 \n",
            " batch_normalization (Batch  (None, 32, 1, 16)         64        \n",
            " Normalization)                                                  \n",
            "                                                                 \n",
            " re_lu (ReLU)                (None, 32, 1, 16)         0         \n",
            "                                                                 \n",
            " conv2d_2 (Conv2D)           (None, 16, 1, 32)         1536      \n",
            "                                                                 \n",
            " batch_normalization_1 (Bat  (None, 16, 1, 32)         128       \n",
            " chNormalization)                                                \n",
            "                                                                 \n",
            " re_lu_1 (ReLU)              (None, 16, 1, 32)         0         \n",
            "                                                                 \n",
            " conv2d_3 (Conv2D)           (None, 8, 1, 64)          6144      \n",
            "                                                                 \n",
            " batch_normalization_2 (Bat  (None, 8, 1, 64)          256       \n",
            " chNormalization)                                                \n",
            "                                                                 \n",
            " re_lu_2 (ReLU)              (None, 8, 1, 64)          0         \n",
            "                                                                 \n",
            " global_average_pooling2d (  (None, 64)                0         \n",
            " GlobalAveragePooling2D)                                         \n",
            "                                                                 \n",
            " dropout (Dropout)           (None, 64)                0         \n",
            "                                                                 \n",
            " dense (Dense)               (None, 3)                 195       \n",
            "                                                                 \n",
            "=================================================================\n",
            "Total params: 8655 (33.81 KB)\n",
            "Trainable params: 8431 (32.93 KB)\n",
            "Non-trainable params: 224 (896.00 Byte)\n",
            "_________________________________________________________________\n",
            "None\n",
            "Epoch 1/30\n",
            "64/64 [==============================] - 6s 36ms/step - loss: 0.9651 - accuracy: 0.5269 - val_loss: 0.9187 - val_accuracy: 0.5700 - lr: 0.0020\n",
            "Epoch 2/30\n",
            "64/64 [==============================] - 1s 23ms/step - loss: 0.8664 - accuracy: 0.5926 - val_loss: 0.8801 - val_accuracy: 0.6073 - lr: 0.0020\n",
            "Epoch 3/30\n",
            "64/64 [==============================] - 2s 32ms/step - loss: 0.7960 - accuracy: 0.6375 - val_loss: 0.7943 - val_accuracy: 0.6402 - lr: 0.0020\n",
            "Epoch 4/30\n",
            "64/64 [==============================] - 2s 32ms/step - loss: 0.7385 - accuracy: 0.6694 - val_loss: 0.7423 - val_accuracy: 0.6588 - lr: 0.0016\n",
            "Epoch 5/30\n",
            "64/64 [==============================] - 2s 29ms/step - loss: 0.7001 - accuracy: 0.6858 - val_loss: 0.7011 - val_accuracy: 0.6873 - lr: 0.0016\n",
            "Epoch 6/30\n",
            "64/64 [==============================] - 2s 30ms/step - loss: 0.6591 - accuracy: 0.7048 - val_loss: 0.6891 - val_accuracy: 0.6848 - lr: 0.0016\n",
            "Epoch 7/30\n",
            "64/64 [==============================] - 2s 24ms/step - loss: 0.6235 - accuracy: 0.7306 - val_loss: 0.6340 - val_accuracy: 0.7108 - lr: 0.0013\n",
            "Epoch 8/30\n",
            "64/64 [==============================] - 1s 23ms/step - loss: 0.5962 - accuracy: 0.7489 - val_loss: 0.6242 - val_accuracy: 0.7079 - lr: 0.0013\n",
            "Epoch 9/30\n",
            "64/64 [==============================] - 1s 14ms/step - loss: 0.5697 - accuracy: 0.7550 - val_loss: 0.5762 - val_accuracy: 0.7393 - lr: 0.0013\n",
            "Epoch 10/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.5398 - accuracy: 0.7714 - val_loss: 0.5549 - val_accuracy: 0.7614 - lr: 0.0010\n",
            "Epoch 11/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.5198 - accuracy: 0.7838 - val_loss: 0.5333 - val_accuracy: 0.7796 - lr: 0.0010\n",
            "Epoch 12/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.5032 - accuracy: 0.7899 - val_loss: 0.5295 - val_accuracy: 0.7894 - lr: 0.0010\n",
            "Epoch 13/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.4779 - accuracy: 0.8016 - val_loss: 0.4926 - val_accuracy: 0.8085 - lr: 8.1920e-04\n",
            "Epoch 14/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.4688 - accuracy: 0.8037 - val_loss: 0.5098 - val_accuracy: 0.7771 - lr: 8.1920e-04\n",
            "Epoch 15/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.4603 - accuracy: 0.8097 - val_loss: 0.4878 - val_accuracy: 0.8036 - lr: 8.1920e-04\n",
            "Epoch 16/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.4406 - accuracy: 0.8201 - val_loss: 0.4708 - val_accuracy: 0.8144 - lr: 6.5536e-04\n",
            "Epoch 17/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.4352 - accuracy: 0.8245 - val_loss: 0.4627 - val_accuracy: 0.8076 - lr: 6.5536e-04\n",
            "Epoch 18/30\n",
            "64/64 [==============================] - 1s 18ms/step - loss: 0.4227 - accuracy: 0.8338 - val_loss: 0.4445 - val_accuracy: 0.8208 - lr: 6.5536e-04\n",
            "Epoch 19/30\n",
            "64/64 [==============================] - 1s 20ms/step - loss: 0.4098 - accuracy: 0.8358 - val_loss: 0.4297 - val_accuracy: 0.8351 - lr: 5.2429e-04\n",
            "Epoch 20/30\n",
            "64/64 [==============================] - 1s 20ms/step - loss: 0.4079 - accuracy: 0.8403 - val_loss: 0.4244 - val_accuracy: 0.8351 - lr: 5.2429e-04\n",
            "Epoch 21/30\n",
            "64/64 [==============================] - 1s 21ms/step - loss: 0.4037 - accuracy: 0.8376 - val_loss: 0.4198 - val_accuracy: 0.8326 - lr: 5.2429e-04\n",
            "Epoch 22/30\n",
            "64/64 [==============================] - 1s 15ms/step - loss: 0.3863 - accuracy: 0.8464 - val_loss: 0.4152 - val_accuracy: 0.8409 - lr: 4.1943e-04\n",
            "Epoch 23/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.3886 - accuracy: 0.8414 - val_loss: 0.4148 - val_accuracy: 0.8419 - lr: 4.1943e-04\n",
            "Epoch 24/30\n",
            "64/64 [==============================] - 1s 13ms/step - loss: 0.3794 - accuracy: 0.8482 - val_loss: 0.4179 - val_accuracy: 0.8380 - lr: 4.1943e-04\n",
            "Epoch 25/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.3741 - accuracy: 0.8515 - val_loss: 0.4364 - val_accuracy: 0.8301 - lr: 3.3554e-04\n",
            "Epoch 26/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.3718 - accuracy: 0.8499 - val_loss: 0.3989 - val_accuracy: 0.8508 - lr: 3.3554e-04\n",
            "Epoch 27/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.3697 - accuracy: 0.8516 - val_loss: 0.4085 - val_accuracy: 0.8267 - lr: 3.3554e-04\n",
            "Epoch 28/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.3658 - accuracy: 0.8550 - val_loss: 0.4032 - val_accuracy: 0.8370 - lr: 2.6844e-04\n",
            "Epoch 29/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.3626 - accuracy: 0.8553 - val_loss: 0.3873 - val_accuracy: 0.8483 - lr: 2.6844e-04\n",
            "Epoch 30/30\n",
            "64/64 [==============================] - 1s 12ms/step - loss: 0.3588 - accuracy: 0.8608 - val_loss: 0.3869 - val_accuracy: 0.8547 - lr: 2.6844e-04\n",
            "64/64 [==============================] - 0s 2ms/step\n",
            "Non-quantized: acc - 0.854688267059401, f1 - 0.8274738426657892, prec - 0.8630289746632304, rec - 0.804897416339064\n",
            "Quantized: acc - 0.8537064310260186, f1 - 0.8256958748894919, prec - 0.8630162870949295, rec - 0.08468985557556152\n",
            "Non-quantized time: 0.34042954444885254; Quantized time: 0.08468985557556152 \n",
            "Non-quantized size: 280.525390625; Quantized size: 14.4453125 KB \n",
            "Non-quantized:  acc - 85.47 F1 - 82.75\n",
            "        prec - 86.30 rec - 80.49\n",
            "        time [ms] - 340.43 \n",
            "Quantized:  acc - 85.37 F1 - 82.57\n",
            "        prec - 86.30 rec - 80.29\n",
            "        time [ms] - 84.69 \n"
          ]
        }
      ],
      "source": [
        "import keras.utils\n",
        "\n",
        "keras.utils.set_random_seed(params[\"seed\"])\n",
        "np.random.seed(params[\"seed\"])\n",
        "training_loop(X_data, y_data, params, False)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "4T9pJNkoKaep"
      },
      "outputs": [],
      "source": [
        "# from sklearn.model_selection import KFold\n",
        "# import keras.utils\n",
        "# import time\n",
        "\n",
        "# total_accuracy = []\n",
        "# total_f1_score = []\n",
        "# total_precision = []\n",
        "# total_recall = []\n",
        "# total_non_quantized_time = []\n",
        "\n",
        "# total_accuracy_quant = []\n",
        "# total_f1_score_quant = []\n",
        "# total_precision_quant = []\n",
        "# total_recall_quant = []\n",
        "# total_quantized_time = []\n",
        "\n",
        "# def training_loop(X_data, y_data, params, crossvalid = False):\n",
        "#   if crossvalid:\n",
        "#     num_folds = 5\n",
        "#     kf = KFold(n_splits=num_folds, shuffle=True, random_state = params[\"seed\"])\n",
        "#     data_splits = [(X_data[train_ind], X_data[val_ind], y_data[train_ind], y_data[val_ind]) for train_ind, val_ind in kf.split(X_data)]\n",
        "#   else:\n",
        "#     num_folds = 1\n",
        "#     X_train, X_val, y_train, y_val = network_training.split_data(X_data, y_data, train_size=0.8)\n",
        "#     data_splits = [(X_train, X_val, y_train, y_val)]\n",
        "\n",
        "#   for fold, (X_train_fold, X_val_fold, y_train_fold, y_val_fold) in enumerate(data_splits):\n",
        "#       print(f\"Fold {fold+1}/{num_folds}\")\n",
        "\n",
        "#       y_train_fold = keras.utils.to_categorical(y_train_fold, num_classes=3)\n",
        "#       y_val_fold = keras.utils.to_categorical(y_val_fold, num_classes=3)\n",
        "\n",
        "#       X_train_fold = np.expand_dims(X_train_fold, axis=-1)\n",
        "#       X_val_fold = np.expand_dims(X_val_fold, axis=-1)\n",
        "\n",
        "#       model, accuracy, f1_score, precision, recall, non_quantized_time = network_training.train_model(params, X_train_fold, y_train_fold, X_val_fold, y_val_fold)\n",
        "#       tflite_quant_model = network_training.get_quantized_model(model, X_train_fold)\n",
        "#       interpreter = network_training.get_tflite_interpreter(tflite_quant_model)\n",
        "#       accuracy_quant, f1_score_quant, precision_quant, recall_quant, quantized_time = network_training.evaluate_quantized_metrics(interpreter, X_val_fold, y_val_fold)\n",
        "\n",
        "#       # start_time = time.time()\n",
        "#       # score = model.evaluate(X_val_fold, y_val_fold, verbose=0)\n",
        "#       # non_quantized_time = time.time() - start_time\n",
        "\n",
        "#       print(f\"\"\"Non-quantized: acc - {accuracy}, f1 - {f1_score}, prec - {precision}, rec - {recall}\"\"\")\n",
        "#       print(f\"\"\"Quantized: acc - {accuracy_quant}, f1 - {f1_score_quant}, prec - {precision_quant}, rec - {quantized_time}\"\"\")\n",
        "#       print(f\"\"\"Non-quantized time: {non_quantized_time}; Quantized time: {quantized_time} \"\"\")\n",
        "\n",
        "#       if fold == 0:\n",
        "#         print(model.summary())\n",
        "#         non_quantized_model_size, quantized_model_size =  network_training.compare_model_sizes(tflite_quant_model, model)\n",
        "#         print(f\"\"\"Non-quantized size: {non_quantized_model_size}; Quantized size: {quantized_model_size} KB \"\"\")\n",
        "\n",
        "#       total_accuracy.append(accuracy)\n",
        "#       total_f1_score.append(f1_score)\n",
        "#       total_precision.append(precision)\n",
        "#       total_recall.append(recall)\n",
        "#       total_non_quantized_time.append(non_quantized_time)\n",
        "\n",
        "#       total_accuracy_quant.append(accuracy_quant)\n",
        "#       total_f1_score_quant.append(f1_score_quant)\n",
        "#       total_precision_quant.append(precision_quant)\n",
        "#       total_recall_quant.append(recall_quant)\n",
        "#       total_quantized_time.append(quantized_time)\n",
        "\n",
        "#   mean_accuracies = np.mean(total_accuracy)\n",
        "#   mean_f1_scores = np.mean(total_f1_score)\n",
        "#   mean_precisions = np.mean(total_precision)\n",
        "#   mean_recalls = np.mean(total_recall)\n",
        "#   mean_times = np.mean(total_non_quantized_time)\n",
        "\n",
        "#   mean_accuracies_quant = np.mean(total_accuracy_quant)\n",
        "#   mean_f1_scores_quant = np.mean(total_f1_score_quant)\n",
        "#   mean_precisions_quant = np.mean(total_precision_quant)\n",
        "#   mean_recalls_quant = np.mean(total_recall_quant)\n",
        "#   mean_times_quant = np.mean(total_quantized_time)\n",
        "#   print(f\"\"\"Non-quantized:  acc - {mean_accuracies*100:.2f} F1 - {mean_f1_scores*100:.2f}\n",
        "#         prec - {mean_precisions*100:.2f} rec - {mean_recalls*100:.2f}\n",
        "#         time - {mean_times:.2f} \"\"\")\n",
        "\n",
        "#   print(f\"\"\"Quantized:  acc - {mean_accuracies_quant*100:.2f} F1 - {mean_f1_scores_quant*100:.2f}\n",
        "#         prec - {mean_precisions_quant*100:.2f} rec - {mean_recalls_quant*100:.2f}\n",
        "#         time - {mean_times_quant:.2f} \"\"\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "saOEMS6Jh86h"
      },
      "outputs": [],
      "source": [
        "# import keras.utils\n",
        "\n",
        "# keras.utils.set_random_seed(params[\"seed\"])\n",
        "# np.random.seed(params[\"seed\"])\n",
        "# training_loop(X_data, y_data, params, True)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}