{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "3f1292c4-98ec-4a41-92fd-511bc5eb5064"
   },
   "source": [
    "## 导入工具包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "uuid": "43cf4894-87c6-417c-ad11-7a7abdbbffac"
   },
   "outputs": [],
   "source": [
    "import cv2\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "\n",
    "from tensorflow.keras.layers import Conv2D\n",
    "from tensorflow.keras.layers import Conv2DTranspose\n",
    "from tensorflow.keras.layers import InputLayer\n",
    "from tensorflow.keras.models import Sequential"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "44451be6-8ac5-44d7-9f41-9950805748a3"
   },
   "source": [
    "## 读取图片"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "uuid": "94e5da7b-d869-4e9e-a2bb-bf0bd9e175d9"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1080, 1920, 3)"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "path = \"./h_GT/Youku_00000_h_GT/001.bmp\"\n",
    "img_GT = cv2.imread(path)/255.0\n",
    "img_GT.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "uuid": "98573f38-420f-4081-ac46-e8df62314db2"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(270, 480, 3)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "path = \"./l/Youku_00000_l/001.bmp\"\n",
    "img_l = cv2.imread(path)/255.0\n",
    "img_l.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "0c48a079-c0be-4f0e-829a-6ab37d8a02aa"
   },
   "source": [
    "# FSRCNN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "bae7fd21-f566-4d7e-bd93-47dfbf2ba0e5"
   },
   "source": [
    "## 实现FSRCNN网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "uuid": "178eb476-ef83-4e75-a404-5b8a15819a25"
   },
   "outputs": [],
   "source": [
    "def fsrcnn():\n",
    "\n",
    "    model = Sequential()\n",
    "    model.add(InputLayer(input_shape=(270, 480, 3)))\n",
    "    \n",
    "    # first_part\n",
    "    model.add(Conv2D(56, 5, padding='same', activation='relu'))\n",
    "    \n",
    "    # mid_part\n",
    "    model.add(Conv2D(12, 1, padding='same', activation='relu'))\n",
    "    for i in range(4):\n",
    "        model.add(Conv2D(12, 3, padding='same', activation='relu'))\n",
    "        \n",
    "    # last_part\n",
    "    model.add(Conv2DTranspose(3, 9, strides=4, padding='same',))\n",
    "    \n",
    "    model.compile(optimizer=tf.optimizers.Adam(1e-1), loss=tf.losses.mse, metrics=['mse'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "c1544098-30c1-4749-8850-c1447a501b1a"
   },
   "source": [
    "## FSRCNN模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "uuid": "717f05b7-415e-453c-9897-dba299726599"
   },
   "outputs": [],
   "source": [
    "# 使用模型\n",
    "model = fsrcnn()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "uuid": "462db850-a148-4196-bc37-9ef75528d184"
   },
   "outputs": [],
   "source": [
    "# 模型监控：自动调节学习率\n",
    "plateau = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', verbose=0, mode='min', factor=0.10, patience=6)\n",
    "# 模型在验证集达到最优停止\n",
    "early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', verbose=0, mode='min', patience=25)\n",
    "# 模型在最优点保持\n",
    "checkpoint = keras.callbacks.ModelCheckpoint('fsrcnn.h5', monitor='val_loss', verbose=0, mode='min', save_best_only=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "uuid": "4a119cf7-ca97-4562-ac34-eda1f3f3a2f1"
   },
   "outputs": [],
   "source": [
    "# 训练数据\n",
    "x = np.array([img_l,img_l])\n",
    "y = np.array([img_GT,img_GT])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "uuid": "38165535-f8c6-4378-9daf-cd67f6810969"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "1/1 [==============================] - 0s 402ms/step - loss: 0.1747 - mse: 0.1747 - val_loss: 73038480.0000 - val_mse: 73038480.0000\n",
      "Epoch 2/10\n",
      "1/1 [==============================] - 0s 276ms/step - loss: 73038480.0000 - mse: 73038480.0000 - val_loss: 29.3217 - val_mse: 29.3217\n",
      "Epoch 3/10\n",
      "1/1 [==============================] - 0s 273ms/step - loss: 29.3217 - mse: 29.3217 - val_loss: 0.1874 - val_mse: 0.1874\n",
      "Epoch 4/10\n",
      "1/1 [==============================] - 0s 258ms/step - loss: 0.1874 - mse: 0.1874 - val_loss: 5.7012 - val_mse: 5.7012\n",
      "Epoch 5/10\n",
      "1/1 [==============================] - 0s 294ms/step - loss: 5.7012 - mse: 5.7012 - val_loss: 0.1751 - val_mse: 0.1751\n",
      "Epoch 6/10\n",
      "1/1 [==============================] - 0s 253ms/step - loss: 0.1751 - mse: 0.1751 - val_loss: 0.2034 - val_mse: 0.2034\n",
      "Epoch 7/10\n",
      "1/1 [==============================] - 0s 255ms/step - loss: 0.2034 - mse: 0.2034 - val_loss: 0.2265 - val_mse: 0.2265\n",
      "Epoch 8/10\n",
      "1/1 [==============================] - 0s 250ms/step - loss: 0.2265 - mse: 0.2265 - val_loss: 0.2365 - val_mse: 0.2365\n",
      "Epoch 9/10\n",
      "1/1 [==============================] - 0s 275ms/step - loss: 0.2365 - mse: 0.2365 - val_loss: 0.2292 - val_mse: 0.2292\n",
      "Epoch 10/10\n",
      "1/1 [==============================] - 0s 263ms/step - loss: 0.2292 - mse: 0.2292 - val_loss: 0.2054 - val_mse: 0.2054\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tensorflow.python.keras.callbacks.History at 0x651fd3f10>"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 模型训练\n",
    "model.fit(x, y, epochs=10, batch_size=2, verbose=1, shuffle=True, validation_data=(x, y), callbacks=[plateau, early_stopping, checkpoint])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "754a5660-3e53-4156-9869-e444d84cad00"
   },
   "source": [
    "## FSRCNN模型验证"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "uuid": "6bb46c68-31b3-429c-9dfe-b3e1796c786b"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.20539522171020508, 0.20539522171020508]"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x, y, verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "747fef28-9222-47ff-94b7-7d8079c8d2dd"
   },
   "source": [
    "## FSRCNN模型预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "uuid": "bf4c5bf3-52d7-404b-8f48-17aea15150a0"
   },
   "outputs": [],
   "source": [
    "pic_super = model.predict(x, verbose=0, batch_size=1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "0569f4e3-2a0d-4df2-ac81-00d72389e047"
   },
   "source": [
    "## 保存图片查看"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "uuid": "8b39e540-823e-4bfa-8b4f-cf55af09132d"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cv2.imwrite(\"./fsrcnn_00.bmp\", pic_super[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "ce984059-653b-45fd-80f8-63c3440dce5a"
   },
   "source": [
    "## 实现ESPCN网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "uuid": "a7dda8db-9dbf-4f0c-9916-39445644ee88"
   },
   "outputs": [],
   "source": [
    "def espcn():\n",
    "    inputs = keras.layers.Input(shape=(270, 480, 3))\n",
    "    cnn = keras.layers.Conv2D(64, 5, padding='same', activation='relu')(inputs)\n",
    "    cnn = keras.layers.Conv2D(32, 3, padding='same', activation='relu')(cnn)\n",
    "    cnn = keras.layers.Conv2D(3 * 4 **2, 3, padding='same')(cnn)\n",
    "    cnn = tf.reshape(cnn, [-1, 270, 480, 4, 4, 3])\n",
    "    cnn = tf.transpose(cnn, perm=[0, 1, 3, 2, 4, 5]) \n",
    "    outputs = tf.reshape(cnn, [-1, 270 * 4, 480 * 4, 3])\n",
    "    \n",
    "    model = keras.models.Model(inputs=[inputs], outputs=[outputs])\n",
    "    model.compile(optimizer=tf.optimizers.Adam(1e-1), loss=tf.losses.mse, metrics=['mse'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "37d7e0f1-4782-4a6c-9781-c5fb19f063f1"
   },
   "source": [
    "## ESPCN模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "uuid": "d425b3a7-5999-4815-9274-71fa677886a9"
   },
   "outputs": [],
   "source": [
    "# 使用模型\n",
    "model = espcn()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "uuid": "016eb2a7-ae76-4cf3-acc7-79984f831d0f"
   },
   "outputs": [],
   "source": [
    "# 模型监控：自动调节学习率\n",
    "plateau = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', verbose=0, mode='min', factor=0.10, patience=6)\n",
    "# 模型在验证集达到最优停止\n",
    "early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', verbose=0, mode='min', patience=25)\n",
    "# 模型在最优点保持\n",
    "checkpoint = keras.callbacks.ModelCheckpoint('espcn.h5', monitor='val_loss', verbose=0, mode='min', save_best_only=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "uuid": "606d3d17-de96-4380-a648-2c08b53b9b16"
   },
   "outputs": [],
   "source": [
    "# 训练数据\n",
    "x = np.array([img_l,img_l])\n",
    "y = np.array([img_GT,img_GT])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "uuid": "e0574122-0511-4d91-b97d-172dda2b30c8"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "1/1 [==============================] - 0s 225ms/step - loss: 0.1809 - mse: 0.1809 - val_loss: 113234.0312 - val_mse: 113234.0312\n",
      "Epoch 2/10\n",
      "1/1 [==============================] - 0s 134ms/step - loss: 113234.0312 - mse: 113234.0312 - val_loss: 3.7164 - val_mse: 3.7164\n",
      "Epoch 3/10\n",
      "1/1 [==============================] - 0s 133ms/step - loss: 3.7164 - mse: 3.7164 - val_loss: 0.5279 - val_mse: 0.5279\n",
      "Epoch 4/10\n",
      "1/1 [==============================] - 0s 116ms/step - loss: 0.5279 - mse: 0.5279 - val_loss: 1.0654 - val_mse: 1.0654\n",
      "Epoch 5/10\n",
      "1/1 [==============================] - 0s 116ms/step - loss: 1.0654 - mse: 1.0654 - val_loss: 171.0230 - val_mse: 171.0230\n",
      "Epoch 6/10\n",
      "1/1 [==============================] - 0s 118ms/step - loss: 171.0230 - mse: 171.0230 - val_loss: 3.2614 - val_mse: 3.2614\n",
      "Epoch 7/10\n",
      "1/1 [==============================] - 0s 119ms/step - loss: 3.2614 - mse: 3.2614 - val_loss: 18.9201 - val_mse: 18.9201\n",
      "Epoch 8/10\n",
      "1/1 [==============================] - 0s 117ms/step - loss: 18.9201 - mse: 18.9201 - val_loss: 4.9444 - val_mse: 4.9444\n",
      "Epoch 9/10\n",
      "1/1 [==============================] - 0s 118ms/step - loss: 4.9444 - mse: 4.9444 - val_loss: 0.8409 - val_mse: 0.8409\n",
      "Epoch 10/10\n",
      "1/1 [==============================] - 0s 117ms/step - loss: 0.8409 - mse: 0.8409 - val_loss: 0.7238 - val_mse: 0.7238\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tensorflow.python.keras.callbacks.History at 0x13bc42c90>"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 模型训练\n",
    "model.fit(x, y, epochs=10, batch_size=2, verbose=1, shuffle=True, validation_data=(x, y), callbacks=[plateau, early_stopping, checkpoint])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "94e510bb-785d-4910-a3b0-95ab8811de80"
   },
   "source": [
    "## ESPCN模型验证"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "uuid": "f2319e67-cee3-4e83-b6a3-62be55232312"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.7238118648529053, 0.7238118648529053]"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x, y, verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "bb956872-2195-4631-863d-2027e4c63b5a"
   },
   "source": [
    "## ESPCN模型预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "uuid": "5a311a2e-2cf2-4c08-aa00-3a531ea253e3"
   },
   "outputs": [],
   "source": [
    "pic_super = model.predict(x, verbose=0, batch_size=1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "uuid": "89ab20c1-7369-494c-8b70-ff3e9e19f026"
   },
   "source": [
    "## 保存图片查看"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "uuid": "78a962c6-74e6-4536-90ec-b305010e6768"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cv2.imwrite(\"./espcn_00.bmp\", pic_super[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "uuid": "1f26930d-9e1e-4f1f-b783-e5f7ec4f2165"
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  },
  "tianchi_metadata": {
   "competitions": [],
   "datasets": [],
   "description": "",
   "notebookId": "60367",
   "source": "ailab"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
