{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# [Keras Tuner 简介](https://www.tensorflow.org/tutorials/keras/keras_tuner?hl=zh-cn)\n",
    "## 概述\n",
    "Keras Tuner 是一个库，可帮助您为 TensorFlow 程序选择最佳的超参数集。为您的机器学习 (ML) 应用选择正确的超参数集，这一过程称为超参数调节或超调。\n",
    "\n",
    "超参数是控制训练过程和 ML 模型拓扑的变量。这些变量在训练过程中保持不变，并会直接影响 ML 程序的性能。超参数有两种类型：\n",
    "\n",
    "1. 模型超参数：影响模型的选择，例如隐藏层的数量和宽度\n",
    "2. 算法超参数：影响学习算法的速度和质量，例如随机梯度下降 (SGD) 的学习率以及 k 近邻 (KNN) 分类器的近邻数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-01-22 10:27:32.545224: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
      "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "import keras_tuner as kt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n",
      "29515/29515 [==============================] - 0s 9us/step\n",
      "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n",
      "26421880/26421880 [==============================] - 3s 0us/step\n",
      "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n",
      "5148/5148 [==============================] - 0s 0us/step\n",
      "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n",
      "4422102/4422102 [==============================] - 2s 0us/step\n"
     ]
    }
   ],
   "source": [
    "(img_train, label_train), (img_test, label_test) = keras.datasets.fashion_mnist.load_data()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Normalize pixel values between 0 and 1\n",
    "img_train = img_train.astype('float32') / 255.0\n",
    "img_test = img_test.astype('float32') / 255.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_builder(hp):\n",
    "  model = keras.Sequential()\n",
    "  model.add(keras.layers.Flatten(input_shape=(28, 28)))\n",
    "\n",
    "  # Tune the number of units in the first Dense layer\n",
    "  # Choose an optimal value between 32-512\n",
    "  hp_units = hp.Int('units', min_value=32, max_value=512, step=32)\n",
    "  model.add(keras.layers.Dense(units=hp_units, activation='relu'))\n",
    "  model.add(keras.layers.Dense(10))\n",
    "\n",
    "  # Tune the learning rate for the optimizer\n",
    "  # Choose an optimal value from 0.01, 0.001, or 0.0001\n",
    "  hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])\n",
    "\n",
    "  model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),\n",
    "                loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n",
    "                metrics=['accuracy'])\n",
    "\n",
    "  return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-01-22 10:28:44.261649: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1639] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 10400 MB memory:  -> device: 0, name: NVIDIA GeForce RTX 3060, pci bus id: 0000:09:00.0, compute capability: 8.6\n"
     ]
    }
   ],
   "source": [
    "tuner = kt.Hyperband(model_builder,\n",
    "                     objective='val_accuracy',\n",
    "                     max_epochs=10,\n",
    "                     factor=3,\n",
    "                     directory='./test/',\n",
    "                     project_name='intro_to_kt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Trial 30 Complete [00h 01m 10s]\n",
      "val_accuracy: 0.8820833563804626\n",
      "\n",
      "Best val_accuracy So Far: 0.8920000195503235\n",
      "Total elapsed time: 00h 15m 04s\n",
      "\n",
      "The hyperparameter search is complete. The optimal number of units in the first densely-connected\n",
      "layer is 352 and the optimal learning rate for the optimizer\n",
      "is 0.001.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "tuner.search(img_train, label_train, epochs=50, validation_split=0.2, callbacks=[stop_early])\n",
    "\n",
    "# Get the optimal hyperparameters\n",
    "best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]\n",
    "\n",
    "print(f\"\"\"\n",
    "The hyperparameter search is complete. The optimal number of units in the first densely-connected\n",
    "layer is {best_hps.get('units')} and the optimal learning rate for the optimizer\n",
    "is {best_hps.get('learning_rate')}.\n",
    "\"\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/50\n",
      "1500/1500 [==============================] - 7s 4ms/step - loss: 0.4973 - accuracy: 0.8246 - val_loss: 0.4161 - val_accuracy: 0.8488\n",
      "Epoch 2/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.3751 - accuracy: 0.8639 - val_loss: 0.3820 - val_accuracy: 0.8601\n",
      "Epoch 3/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.3324 - accuracy: 0.8784 - val_loss: 0.3513 - val_accuracy: 0.8712\n",
      "Epoch 4/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.3087 - accuracy: 0.8852 - val_loss: 0.3244 - val_accuracy: 0.8826\n",
      "Epoch 5/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2871 - accuracy: 0.8936 - val_loss: 0.3453 - val_accuracy: 0.8755\n",
      "Epoch 6/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2734 - accuracy: 0.8975 - val_loss: 0.3339 - val_accuracy: 0.8768\n",
      "Epoch 7/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2581 - accuracy: 0.9048 - val_loss: 0.3206 - val_accuracy: 0.8865\n",
      "Epoch 8/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2456 - accuracy: 0.9074 - val_loss: 0.3228 - val_accuracy: 0.8907\n",
      "Epoch 9/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2356 - accuracy: 0.9124 - val_loss: 0.3150 - val_accuracy: 0.8928\n",
      "Epoch 10/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2282 - accuracy: 0.9133 - val_loss: 0.3171 - val_accuracy: 0.8942\n",
      "Epoch 11/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2165 - accuracy: 0.9179 - val_loss: 0.3515 - val_accuracy: 0.8852\n",
      "Epoch 12/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2101 - accuracy: 0.9209 - val_loss: 0.3586 - val_accuracy: 0.8790\n",
      "Epoch 13/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2028 - accuracy: 0.9239 - val_loss: 0.3187 - val_accuracy: 0.8923\n",
      "Epoch 14/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1956 - accuracy: 0.9252 - val_loss: 0.3194 - val_accuracy: 0.8920\n",
      "Epoch 15/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1874 - accuracy: 0.9289 - val_loss: 0.3243 - val_accuracy: 0.8944\n",
      "Epoch 16/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1815 - accuracy: 0.9326 - val_loss: 0.3282 - val_accuracy: 0.8935\n",
      "Epoch 17/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1748 - accuracy: 0.9326 - val_loss: 0.3346 - val_accuracy: 0.8912\n",
      "Epoch 18/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1680 - accuracy: 0.9355 - val_loss: 0.3709 - val_accuracy: 0.8915\n",
      "Epoch 19/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1651 - accuracy: 0.9381 - val_loss: 0.3446 - val_accuracy: 0.8928\n",
      "Epoch 20/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1588 - accuracy: 0.9409 - val_loss: 0.3334 - val_accuracy: 0.8951\n",
      "Epoch 21/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1586 - accuracy: 0.9402 - val_loss: 0.3623 - val_accuracy: 0.8882\n",
      "Epoch 22/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1507 - accuracy: 0.9442 - val_loss: 0.3640 - val_accuracy: 0.8898\n",
      "Epoch 23/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1461 - accuracy: 0.9452 - val_loss: 0.3624 - val_accuracy: 0.8909\n",
      "Epoch 24/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1422 - accuracy: 0.9453 - val_loss: 0.3628 - val_accuracy: 0.8937\n",
      "Epoch 25/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1411 - accuracy: 0.9463 - val_loss: 0.3597 - val_accuracy: 0.8948\n",
      "Epoch 26/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1332 - accuracy: 0.9499 - val_loss: 0.3722 - val_accuracy: 0.8914\n",
      "Epoch 27/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1313 - accuracy: 0.9502 - val_loss: 0.3806 - val_accuracy: 0.8943\n",
      "Epoch 28/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1274 - accuracy: 0.9513 - val_loss: 0.4020 - val_accuracy: 0.8893\n",
      "Epoch 29/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1252 - accuracy: 0.9528 - val_loss: 0.3956 - val_accuracy: 0.8935\n",
      "Epoch 30/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1175 - accuracy: 0.9558 - val_loss: 0.3944 - val_accuracy: 0.8938\n",
      "Epoch 31/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1202 - accuracy: 0.9558 - val_loss: 0.4225 - val_accuracy: 0.8827\n",
      "Epoch 32/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1168 - accuracy: 0.9552 - val_loss: 0.3928 - val_accuracy: 0.8956\n",
      "Epoch 33/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1129 - accuracy: 0.9584 - val_loss: 0.4124 - val_accuracy: 0.8928\n",
      "Epoch 34/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1117 - accuracy: 0.9586 - val_loss: 0.4050 - val_accuracy: 0.8952\n",
      "Epoch 35/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1065 - accuracy: 0.9597 - val_loss: 0.4428 - val_accuracy: 0.8898\n",
      "Epoch 36/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1048 - accuracy: 0.9605 - val_loss: 0.4255 - val_accuracy: 0.8926\n",
      "Epoch 37/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0988 - accuracy: 0.9630 - val_loss: 0.4431 - val_accuracy: 0.8959\n",
      "Epoch 38/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1018 - accuracy: 0.9624 - val_loss: 0.4430 - val_accuracy: 0.8947\n",
      "Epoch 39/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0967 - accuracy: 0.9630 - val_loss: 0.4455 - val_accuracy: 0.8955\n",
      "Epoch 40/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0934 - accuracy: 0.9651 - val_loss: 0.4429 - val_accuracy: 0.8949\n",
      "Epoch 41/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0940 - accuracy: 0.9641 - val_loss: 0.4567 - val_accuracy: 0.8955\n",
      "Epoch 42/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0931 - accuracy: 0.9650 - val_loss: 0.4506 - val_accuracy: 0.8965\n",
      "Epoch 43/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0868 - accuracy: 0.9684 - val_loss: 0.4432 - val_accuracy: 0.8975\n",
      "Epoch 44/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0871 - accuracy: 0.9665 - val_loss: 0.5198 - val_accuracy: 0.8912\n",
      "Epoch 45/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0848 - accuracy: 0.9686 - val_loss: 0.4876 - val_accuracy: 0.8940\n",
      "Epoch 46/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0803 - accuracy: 0.9707 - val_loss: 0.4849 - val_accuracy: 0.8900\n",
      "Epoch 47/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0826 - accuracy: 0.9693 - val_loss: 0.4850 - val_accuracy: 0.8899\n",
      "Epoch 48/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0815 - accuracy: 0.9691 - val_loss: 0.4818 - val_accuracy: 0.8980\n",
      "Epoch 49/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0762 - accuracy: 0.9713 - val_loss: 0.5244 - val_accuracy: 0.8893\n",
      "Epoch 50/50\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0785 - accuracy: 0.9705 - val_loss: 0.5296 - val_accuracy: 0.8928\n",
      "Best epoch: 48\n"
     ]
    }
   ],
   "source": [
    "# Build the model with the optimal hyperparameters and train it on the data for 50 epochs\n",
    "model = tuner.hypermodel.build(best_hps)\n",
    "history = model.fit(img_train, label_train, epochs=50, validation_split=0.2)\n",
    "\n",
    "val_acc_per_epoch = history.history['val_accuracy']\n",
    "best_epoch = val_acc_per_epoch.index(max(val_acc_per_epoch)) + 1\n",
    "print('Best epoch: %d' % (best_epoch,))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/48\n",
      "1500/1500 [==============================] - 7s 4ms/step - loss: 0.4933 - accuracy: 0.8261 - val_loss: 0.4358 - val_accuracy: 0.8443\n",
      "Epoch 2/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.3689 - accuracy: 0.8659 - val_loss: 0.3682 - val_accuracy: 0.8665\n",
      "Epoch 3/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.3321 - accuracy: 0.8786 - val_loss: 0.3568 - val_accuracy: 0.8718\n",
      "Epoch 4/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.3092 - accuracy: 0.8866 - val_loss: 0.3348 - val_accuracy: 0.8814\n",
      "Epoch 5/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2860 - accuracy: 0.8946 - val_loss: 0.3386 - val_accuracy: 0.8787\n",
      "Epoch 6/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2726 - accuracy: 0.8978 - val_loss: 0.3165 - val_accuracy: 0.8896\n",
      "Epoch 7/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2593 - accuracy: 0.9022 - val_loss: 0.3356 - val_accuracy: 0.8782\n",
      "Epoch 8/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2463 - accuracy: 0.9086 - val_loss: 0.3167 - val_accuracy: 0.8873\n",
      "Epoch 9/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2379 - accuracy: 0.9110 - val_loss: 0.3175 - val_accuracy: 0.8849\n",
      "Epoch 10/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2277 - accuracy: 0.9154 - val_loss: 0.3268 - val_accuracy: 0.8891\n",
      "Epoch 11/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2175 - accuracy: 0.9166 - val_loss: 0.3138 - val_accuracy: 0.8911\n",
      "Epoch 12/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2091 - accuracy: 0.9215 - val_loss: 0.3441 - val_accuracy: 0.8874\n",
      "Epoch 13/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.2031 - accuracy: 0.9236 - val_loss: 0.3250 - val_accuracy: 0.8904\n",
      "Epoch 14/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1953 - accuracy: 0.9273 - val_loss: 0.3253 - val_accuracy: 0.8903\n",
      "Epoch 15/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1880 - accuracy: 0.9293 - val_loss: 0.3282 - val_accuracy: 0.8913\n",
      "Epoch 16/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1801 - accuracy: 0.9323 - val_loss: 0.3286 - val_accuracy: 0.8935\n",
      "Epoch 17/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1755 - accuracy: 0.9327 - val_loss: 0.3542 - val_accuracy: 0.8894\n",
      "Epoch 18/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1700 - accuracy: 0.9361 - val_loss: 0.3403 - val_accuracy: 0.8953\n",
      "Epoch 19/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1648 - accuracy: 0.9373 - val_loss: 0.3478 - val_accuracy: 0.8940\n",
      "Epoch 20/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1606 - accuracy: 0.9402 - val_loss: 0.3480 - val_accuracy: 0.8982\n",
      "Epoch 21/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1536 - accuracy: 0.9418 - val_loss: 0.3803 - val_accuracy: 0.8827\n",
      "Epoch 22/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1487 - accuracy: 0.9427 - val_loss: 0.3444 - val_accuracy: 0.8972\n",
      "Epoch 23/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1466 - accuracy: 0.9440 - val_loss: 0.3716 - val_accuracy: 0.8969\n",
      "Epoch 24/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1417 - accuracy: 0.9465 - val_loss: 0.3669 - val_accuracy: 0.8892\n",
      "Epoch 25/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1361 - accuracy: 0.9491 - val_loss: 0.3706 - val_accuracy: 0.8925\n",
      "Epoch 26/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1330 - accuracy: 0.9494 - val_loss: 0.4138 - val_accuracy: 0.8873\n",
      "Epoch 27/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1295 - accuracy: 0.9511 - val_loss: 0.3805 - val_accuracy: 0.8934\n",
      "Epoch 28/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1267 - accuracy: 0.9526 - val_loss: 0.3820 - val_accuracy: 0.8950\n",
      "Epoch 29/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1225 - accuracy: 0.9532 - val_loss: 0.3772 - val_accuracy: 0.8953\n",
      "Epoch 30/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1179 - accuracy: 0.9563 - val_loss: 0.4035 - val_accuracy: 0.8910\n",
      "Epoch 31/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1162 - accuracy: 0.9563 - val_loss: 0.4087 - val_accuracy: 0.8914\n",
      "Epoch 32/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1140 - accuracy: 0.9564 - val_loss: 0.4215 - val_accuracy: 0.8973\n",
      "Epoch 33/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1102 - accuracy: 0.9577 - val_loss: 0.4483 - val_accuracy: 0.8859\n",
      "Epoch 34/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1055 - accuracy: 0.9598 - val_loss: 0.4352 - val_accuracy: 0.8922\n",
      "Epoch 35/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1034 - accuracy: 0.9607 - val_loss: 0.4476 - val_accuracy: 0.8927\n",
      "Epoch 36/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.1030 - accuracy: 0.9613 - val_loss: 0.4153 - val_accuracy: 0.8957\n",
      "Epoch 37/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0981 - accuracy: 0.9626 - val_loss: 0.4587 - val_accuracy: 0.8940\n",
      "Epoch 38/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0975 - accuracy: 0.9632 - val_loss: 0.4456 - val_accuracy: 0.8968\n",
      "Epoch 39/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0967 - accuracy: 0.9636 - val_loss: 0.4301 - val_accuracy: 0.8988\n",
      "Epoch 40/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0949 - accuracy: 0.9639 - val_loss: 0.4399 - val_accuracy: 0.8968\n",
      "Epoch 41/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0879 - accuracy: 0.9667 - val_loss: 0.4658 - val_accuracy: 0.8951\n",
      "Epoch 42/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0898 - accuracy: 0.9668 - val_loss: 0.4450 - val_accuracy: 0.8965\n",
      "Epoch 43/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0869 - accuracy: 0.9673 - val_loss: 0.4751 - val_accuracy: 0.8973\n",
      "Epoch 44/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0874 - accuracy: 0.9677 - val_loss: 0.4710 - val_accuracy: 0.8961\n",
      "Epoch 45/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0841 - accuracy: 0.9676 - val_loss: 0.5127 - val_accuracy: 0.8873\n",
      "Epoch 46/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0820 - accuracy: 0.9687 - val_loss: 0.4925 - val_accuracy: 0.8952\n",
      "Epoch 47/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0782 - accuracy: 0.9710 - val_loss: 0.5118 - val_accuracy: 0.8903\n",
      "Epoch 48/48\n",
      "1500/1500 [==============================] - 6s 4ms/step - loss: 0.0806 - accuracy: 0.9699 - val_loss: 0.4779 - val_accuracy: 0.8959\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.src.callbacks.History at 0x148a3aa42fb0>"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "hypermodel = tuner.hypermodel.build(best_hps)\n",
    "\n",
    "# Retrain the model\n",
    "hypermodel.fit(img_train, label_train, epochs=best_epoch, validation_split=0.2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "313/313 [==============================] - 1s 3ms/step - loss: 0.5589 - accuracy: 0.8904\n",
      "[test loss, test accuracy]: [0.5589279532432556, 0.8903999924659729]\n"
     ]
    }
   ],
   "source": [
    "eval_result = hypermodel.evaluate(img_test, label_test)\n",
    "print(\"[test loss, test accuracy]:\", eval_result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
