{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "img_width = 28\n",
    "img_height = 28\n",
    "channels = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 500\n",
    "num_epochs = 80\n",
    "iterations = 3\n",
    "nb_augmentation = 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "fashion_classes = {0:'T恤',\n",
    "1:'裤子',\n",
    "2:'套衫',\n",
    "3:'裙子',\n",
    "4:'外套',\n",
    "5:'凉鞋',\n",
    "6:'汗衫',\n",
    "7:'运动鞋',\n",
    "8:'包',\n",
    "9:'踝靴',}\n",
    "mnist_classes =[i for i in range (10)]\n",
    "num_classes =10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train Samples 60000\n",
      "Test Samples 10000\n"
     ]
    }
   ],
   "source": [
    "import tensorflow_datasets as tfds\n",
    "train_fasion_mnist=tfds.as_numpy(tfds.load(\"fashion_mnist\",split=\"train\",data_dir=\"./\",download=False,batch_size=-1))\n",
    "X_train,y_train=train_fasion_mnist[\"image\"],train_fasion_mnist[\"label\"]\n",
    "test_fasion_mnist = tfds.as_numpy(tfds.load(\"fashion_mnist\",split=\"test\",data_dir=\"./\",download=False,batch_size=-1))\n",
    "X_test,y_test=test_fasion_mnist[\"image\"],test_fasion_mnist[\"label\"]\n",
    "print(\"Train Samples\",len(X_train))\n",
    "print(\"Test Samples\",len(X_test))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAASUAAAElCAYAAACiZ/R3AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAADxhJREFUeJzt3VtrnOUaxvFnsplMdm12pU3dZKRRKfWgLY20FoqgUNQDP4B4JH4PQfBjeKogelT0qC3YIghVBNM0piWmaWhqmu1kv511tGB59PyzeMPcy/X/HV/cM5lMr77C3dtSvV5PkhRFU6PfgCT9J0tJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhSKpSQpFEtJUigthwkPDAzUq9XqEb2Vf679/f1sZmdnB806ODhAuebm5sJyra2taFaRtre3UY7+Myn6eTTiZ/1/8csvv8zX6/UTudyhSqlaraZ79+799+8qAPIlLpVKhb7m0tJSNjMzM4Nmra+vo1xfXx/KdXd3ZzODg4NoVpEePXqEcqTwU0rp+PHjKHfq1CmU0+GVSqXHJOd/vkkKxVKSFIqlJCkUS0lSKJaSpFAsJUmhWEqSQjnUnlJkdImuyB2kmzdvotzs7Gw2Uy6X0azTp0+jXKVSQTmypzQ+Po5mffnllyj3+eefZzMtLeyrOTU1hXK//voryjU15f+evn79OprV09ODcvo7n5QkhWIpSQrFUpIUiqUkKRRLSVIolpKkUCwlSaFYSpJC+ccsTxa5FPnNN9+gHD24Njw8nM3Q5U96oXJychLlarVaNjM/P49mHTt2DOX+/PPPbIa8r5T4RckzZ86g3MLCQjZz48YNNOu9995Duf7+/myGfj+oog8ZFsknJUmhWEqSQrGUJIViKUkKxVKSFIqlJCkUS0lSKJaSpFAsJUmhhN/oLvrMLdlO3tzcRLOq1SrKbWxsZDP0/2FPz+ZubW2hHPnfY9NNbbrBTE/YEh0dHSi3vb2NcuQ88N7eHpp1//59lLt27Vo2E3kDu2g+KUkKxVKSFIqlJCkUS0lSKJaSpFAsJUmhWEqSQrGUJIViKUkKJfxGd9GbrOPj49lMV1cXmrW8vIxy5I50S0uxvwr6M5Ac3WAmm+sppXT8+PFshv7e6eY6/XzJ3XWy9Z1SSs+fP0e51dXVwl6z6H8B0Qg+KUkKxVKSFIqlJCkUS0lSKJaSpFAsJUmhWEqSQrGUJIViKUkKpWEb3Y3aPP3rr7+ymd7eXjRrbW0N5drb27OZpib298Pu7i7K7ezsoBxB3xu1v79f6DyiUqmg3OLiYjZDt8PpFvbo6Gg2c+XKFTQr8qY25ZOSpFAsJUmhWEqSQrGUJIViKUkKxVKSFIqlJCkUS0lSKP+Y5clarYZyKysr2Qw515oSXwJsa2vLZujn0QhFLzuS3yn9PMip4ZTY7yCllDo7O7MZcjI3pZR6enpQbnp6Ops5f/48mkUWdVOKfTbXJyVJoVhKkkKxlCSFYilJCsVSkhSKpSQpFEtJUiiWkqRQLCVJoTRso7voE6sPHjxAObLxWvQGc5Gnaff29lCObuyS0650a3p7exvlyHsrl8toFv1dkU3+lNj3kv4OWltbC3tNcsY5pZSq1SrKReaTkqRQLCVJoVhKkkKxlCSFYilJCsVSkhSKpSQpFEtJUiiWkqRQGrbRXbSZmRmU6+7uzmboBvba2hrKNeLOcZE3mCuVCppV5Lb51tYWmkW3zem/IJifn89m5ubm0KzTp0+jHPkZpqam0Cy60d2I7yTlk5KkUCwlSaFYSpJCsZQkhWIpSQrFUpIUiqUkKRRLSVIoR7I8SZbj6PIWXQKs1WooNzw8nM2MjY0V+ppkYfPg4ADNomdid3d3UY58vnTxkL438pp0EZPa2NhAObK0+fPPP6NZ58+fR7menp5shp7DLVqRf5Ypn5QkhWIpSQrFUpIUiqUkKRRLSVIolpKkUCwlSaFYSpJCsZQkhRL+HC49A7q9vV3Ya05MTKBcV1cXyrW05D/mzc1NNIuiG9H7+/vZTHt7O5pV5AleuiXc1taGcq2trSi3tLSUzdAzyE+ePEG51157LZt5+vQpmrW4uIhyfX19KNcIPilJCsVSkhSKpSQpFEtJUiiWkqRQLCVJoVhKkkKxlCSFYilJCuVINrqLvNlb9IYq2XSmN7rpDWay6Uy2vg+TKxJ9TXLfOiV2j7y5uRnNopvwdN7g4GA2884776BZd+7cQblqtZrN0A19esub/nkp+v424ZOSpFAsJUmhWEqSQrGUJIViKUkKxVKSFIqlJCkUS0lSKJaSpFDC3+ienZ1FudXV1cJec25uDuU+++wzlLt79242Q29ILy8vo1xTE/v7hmyb021osqmdEvtZ6fun6Eb0wMBANvPJJ5+gWR988AHKke/uzMwMmjU0NIRyZ8+eRblG8ElJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhSKpSQplIYtT9JlR7Lcl1JK586dQ7kffvghm7l+/Tqatbu7i3K1Wi2beemll9CshYUFlJufn0c58jNUKhU0iyKLjCsrK2hWf38/ypXLZZQjP2t7ezua1d3djXKTk5PZDP05i/5dNYJPSpJCsZQkhWIpSQrFUpIUiqUkKRRLSVIolpKkUCwlSaFYSpJCadhG982bN1FudHQU5Xp6elCuo6Mjm6Fb5Ldv30a5qampbGZ9fR3NamtrQzn63sj51xdffBHNomeEyXbyrVu30KyPPvoI5X766SeUO3HiRDbzxhtvoFkjIyMo9+OPP2YzH374IZo1NjaGcq+++irKke9H0XxSkhSKpSQpFEtJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhRKwza6JyYmUI5ulNJ55D403Ux+9913UW5tba2QTEopdXZ2otz29jbKkc+D3ocmt8hTSqlarWYz9EZ3c3MzytEb6M+fP89m7t69W9islFJaXl7OZpaWltCshw8fotzbb7+Ncm50S/q/ZylJCsVSkhSKpSQpFEtJUiiWkqRQLCVJoVhKkkJp2PLkzMwMyl28eBHlurq6UO7Ro0fZzPvvv49m7e7uotzs7Gw2Q0/O0oW8g4MDlCNLiuVyGc06duwYytHPjVhdXUU5ugQ4OTmZzZCTuSml1NTE/s7f2trKZnp7e9Gs9vZ2lKPfozNnzqBckXxSkhSKpSQpFEtJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhTKkWx0k1OsdBN3cHAQ5ei5ULLZSzdx19fXC8vRLWF6qpdudNfr9UIyKfETvGSDuaWFfTUXFhZQbmhoCOV2dnaymVKpVOhrfv3119kMOZmbEj8PPD09jXKXL19GuSL5pCQpFEtJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhSKpSQplCPZ6P7tt9+ymf39fTTr8ePHKEc3uoeHh7MZeueYbn6Tm9T9/f1o1sbGBsrRz5f8DHt7e2gW3cJua2vLZjo7O9GstbU1lKP3w8l7o/8a4eTJkyhHfvdk0zyllM6dO4dyz549Q7lG8ElJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhSKpSQpFEtJUihHstH9xRdfZDPkTnNKKX311VcoV6lUUO7NN9/MZuh9656eHpQjG+IdHR1oFv3cNjc3UY5sOtNtYrK5nhLbEC+Xy2gW3a6m2/fkd0Vvs9Nt85GRkWzm+++/R7OuXr2KcvR7RP4s0M+W8klJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhSKpSQplCNZniQLbS+88AKaNT4+jnK1Wg3lbt26lc18+umnaBZdUCTnTunJWbqgSE/6EvV6vbBZFH3/c3NzKEeXa0mOLgtOT0+j3MLCQjZz+/ZtNIuehd7e3ka5jz/+OJt5/fXX0SzKJyVJoVhKkkKxlCSFYilJCsVSkhSKpSQpFEtJUiiWkqRQLCVJoRzJRjc5ZdrX14dm0TOxdEOcnLCl279003lgYCCboSdnW1tbUY5uHZN5dNucvrf9/f1s5tSpU2jW77//jnJ0E56cB6boa5Ic3cCmuWq1inKLi4soVySflCSFYilJCsVSkhSKpSQpFEtJUiiWkqRQLCVJoVhKkkKxlCSFcqiN7r29PbThSTZU6aboW2+9hXJ08/u7777LZi5duoRm0e3f9fX1bGZjYwPNopvfdAu7paW4pX6yqZ1SSm1tbdlMZ2cnmnVwcIBy9IY70dXVhXL0dzUyMpLN0D8vvb29KHfhwgWUu3PnTjZz5coVNIvySUlSKJaSpFAsJUmhWEqSQrGUJIViKUkKxVKSFIqlJCmUQ23OrayspBs3bmRz5NQtXe4jp2RTSun+/fso9+DBg2ymu7sbzaJLdORnpZ9Hc3MzytFTvfSELUFPsZIlS7rUSc/+bm1toRxZ2qRLkeT0ckopPXz4MJuh3zW6REz+HKSUUnt7O8oVySclSaFYSpJCsZQkhWIpSQrFUpIUiqUkKRRLSVIolpKkUCwlSaEcaqO7VCqhDVqyfXrx4kX0mq+88grK0Y3XZ8+eZTNkIz0lvtlbLpezGboNTTewS6USypHPjbz/lPg5XDKPnMxNiZ/NnZ+fRznyedAt55WVFZQj2/cvv/wymkWdPHkS5YaGhrIZcu75MHxSkhSKpSQpFEtJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhTKoTa6K5VKOnv2bDZHtk9PnDiBXnN0dBTl6G3iy5cvZzMLCwtoFt103tzcRDliZmYG5eiW7ezsbDaztLSEZtVqNZRbXFzMZp48eYJm0dvbdLu6UqkUNuvp06coNzw8nM2MjIygWd9++y3KXbhwAeUODg6yGfo7oHxSkhSKpSQpFEtJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhRKidwH/rdLly7V7927l82RmfSe8/LyMsrRW83kFvbExASaRbfSyUY02ZxNKaXHjx+j3NzcHMpdvXo1m6Gb2vS7RO5Dk1vqKaU0NjaGcvTWO/ke0Xvw3d3dKEc2/gcGBtCslhb2jzTW1tZQ7o8//shmrl27hmZ1dHT8Uq/XL+VyPilJCsVSkhSKpSQpFEtJUiiWkqRQLCVJoVhKkkKxlCSFciTLk//rVldXUY6e4G1qynd/qVRCs+jSKc2Rk77kfG1KKbW3t6McOTlLP49GIAu4KfFzyUTkz4N2SFNTk8uTkv73WEqSQrGUJIViKUkKxVKSFIqlJCkUS0lSKJaSpFAsJUmhHGqju1QqPU8psXuskvR3Q/V6PXtD+lClJElHzf98kxSKpSQpFEtJUiiWkqRQLCVJoVhKkkKxlCSFYilJCsVSkhTKvwCImA6tLlipWAAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 360x360 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "类型: 汗衫\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "plt.figure(figsize=(5,5))\n",
    "i=np.random.randint(len(X_train))\n",
    "img = X_train[i].reshape(28, 28)\n",
    "plt.xticks([])\n",
    "plt.yticks([])\n",
    "plt.grid(False)\n",
    "plt.imshow(img, cmap=plt.cm.binary)\n",
    "plt.show()\n",
    "print(\"类型:\",fashion_classes[y_train[i]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "import cv2\n",
    "import numpy as np\n",
    "def img_scale(image):#放大\n",
    "    result = cv2.resize(image, (600, 600))\n",
    "    #cv2.imshow(\"scale\", result)\n",
    "    #cv2.waitKey(0)\n",
    "    return result\n",
    "def img_rotation(image):\n",
    "    # 原图的高、宽 以及通道数\n",
    "    rows, cols= image.shape\n",
    "\n",
    "    # 绕图像的中心旋转\n",
    "    # 参数：旋转中心 旋转度数 scale\n",
    "    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)\n",
    "    # 参数：原始图像 旋转参数 元素图像宽高\n",
    "    rotated = cv2.warpAffine(image, M, (cols, rows))\n",
    "\n",
    "    # 显示图像\n",
    "    cv2.imshow(\"rotated\", rotated)\n",
    "    cv2.waitKey(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "def img_flip(image):\n",
    "    # 0以X轴为对称轴翻转,>0以Y轴为对称轴翻转, <0X轴Y轴翻转\n",
    "    horizontally = cv2.flip(image, 0)  # 水平镜像\n",
    "    vertically = cv2.flip(image, 1)  # 垂直镜像\n",
    "    hv = cv2.flip(image, -1)  # 水平垂直镜像\n",
    "\n",
    "    # 显示图形\n",
    "    cv2.imshow(\"Horizontally\", horizontally)\n",
    "    cv2.imshow(\"Vertically\", vertically)\n",
    "    cv2.imshow(\"Horizontally & Vertically\", hv)\n",
    "    cv2.waitKey(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "-1"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import cv2\n",
    "img1=cv2.resize(img,(600,600))#大图为200*200\n",
    "img1_r=600-img1.shape[0]#第0个维度填充到200需要的像素点个数\n",
    "img1_b=600-img1.shape[1]#第1个维度填充到200需要的像素点个数\n",
    "img1_pad=np.pad(img1,((0,img1_r),(0,img1_b)),'constant', constant_values=0)\n",
    "cv2.imshow(\"tianchong\",img1)\n",
    "cv2.waitKey(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "t=img_scale(img)\n",
    "img_rotation(t)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "img_flip(t)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
    "#定义用于图像增强的函数\n",
    "datagen = ImageDataGenerator(\n",
    "    rotation_range=10,        #旋转角度\n",
    "#    width_shift_range=0.2,   #水平偏移\n",
    "#    height_shift_range=0.2,  #垂直偏移\n",
    "#    shear_range=0.2,         #随机错切变换的角度\n",
    "#    zoom_range=0.2,          #随机缩放的范围\n",
    "    horizontal_flip=True,    #随机将一半图像水平翻转\n",
    "    fill_mode='nearest'      #随机将一半图像水平翻转\n",
    ")\n",
    "\n",
    "#image 原始图像\n",
    "#nb_augmentation 增加的数量\n",
    "#images 初始化后的图像\n",
    "\n",
    "def image_augmentation(image, nb_of_augmentation):\n",
    "    images = []\n",
    "    image = image.reshape(1,img_height,img_width,channels)\n",
    "    i = 0\n",
    "    for x_batch in datagen.flow(image,batch_size=1):\n",
    "        images.append(x_batch)\n",
    "        i += 1\n",
    "        if i >= nb_of_augmentation:\n",
    "            break\n",
    "    return images"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "\n",
    "#targets 目标\n",
    "#use_sugmentation 进行数据增强则设置为True\n",
    "#nb_of_augmentation 图像增强设置的数量\n",
    "\n",
    "def preprocess_data(images, targets, use_augmentation=False, nb_of_augmentation=1):\n",
    "    \n",
    "    X = []\n",
    "    y = []\n",
    "    for x_, y_ in zip(images, targets):\n",
    "        #像素缩放\n",
    "        x_ = x_ / 255.0\n",
    "        #数据增强\n",
    "        if use_augmentation:\n",
    "            argu_img = image_augmentation(x_, nb_of_augmentation)\n",
    "            for a in argu_img:\n",
    "                X.append(a.reshape(img_height, img_width, channels))\n",
    "                y.append(y_)\n",
    "                \n",
    "        X.append(x_)\n",
    "        y.append(y_)\n",
    "    print(\"预处理结束：%i 个样本\\n\" % len(X))\n",
    "    return np.array(X),tf.keras.utils.to_categorical(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "预处理结束：180000 个样本\n",
      "\n",
      "预处理结束：10000 个样本\n",
      "\n"
     ]
    }
   ],
   "source": [
    "X_train_shaped, y_train_shaped = preprocess_data(\n",
    "    X_train, y_train,\n",
    "    use_augmentation = True,\n",
    "    nb_of_augmentation = nb_augmentation\n",
    ")\n",
    "X_test_shaped, y_test_shaped = preprocess_data(X_test,y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "def create_model():\n",
    "    cnn = tf.keras.Sequential()\n",
    "    cnn.add(tf.keras.layers.InputLayer(input_shape=(img_height,img_width,channels)))\n",
    "    cnn.add(tf.keras.layers.BatchNormalization())\n",
    "    cnn.add(tf.keras.layers.Convolution2D(64,(4,4),padding= 'same',activation='relu'))\n",
    "    cnn.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2)))\n",
    "    cnn.add(tf.keras.layers.Dropout(0.1))\n",
    "    cnn.add(tf.keras.layers.Convolution2D(64,(4,4),activation='relu'))\n",
    "    cnn.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2)))\n",
    "    cnn.add(tf.keras.layers.Dropout(0.3))\n",
    "    cnn.add(tf.keras.layers.Flatten())\n",
    "    cnn.add(tf.keras.layers.Dense(256,activation='relu'))\n",
    "    cnn.add(tf.keras.layers.Dropout(0.5))\n",
    "    cnn.add(tf.keras.layers.Dense(64,activation='relu'))\n",
    "    cnn.add(tf.keras.layers.BatchNormalization())\n",
    "    cnn.add(tf.keras.layers.Dense(num_classes,activation=\"softmax\" ))\n",
    "    cnn.compile(loss= 'categorical_crossentropy',optimizer=tf.keras.optimizers.Adam(),metrics=['accuracy'])\n",
    "    return cnn                                 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "batch_normalization (BatchNo (None, 28, 28, 1)         4         \n",
      "_________________________________________________________________\n",
      "conv2d (Conv2D)              (None, 28, 28, 64)        1088      \n",
      "_________________________________________________________________\n",
      "max_pooling2d (MaxPooling2D) (None, 14, 14, 64)        0         \n",
      "_________________________________________________________________\n",
      "dropout (Dropout)            (None, 14, 14, 64)        0         \n",
      "_________________________________________________________________\n",
      "conv2d_1 (Conv2D)            (None, 11, 11, 64)        65600     \n",
      "_________________________________________________________________\n",
      "max_pooling2d_1 (MaxPooling2 (None, 5, 5, 64)          0         \n",
      "_________________________________________________________________\n",
      "dropout_1 (Dropout)          (None, 5, 5, 64)          0         \n",
      "_________________________________________________________________\n",
      "flatten (Flatten)            (None, 1600)              0         \n",
      "_________________________________________________________________\n",
      "dense (Dense)                (None, 256)               409856    \n",
      "_________________________________________________________________\n",
      "dropout_2 (Dropout)          (None, 256)               0         \n",
      "_________________________________________________________________\n",
      "dense_1 (Dense)              (None, 64)                16448     \n",
      "_________________________________________________________________\n",
      "batch_normalization_1 (Batch (None, 64)                256       \n",
      "_________________________________________________________________\n",
      "dense_2 (Dense)              (None, 10)                650       \n",
      "=================================================================\n",
      "Total params: 493,902\n",
      "Trainable params: 493,772\n",
      "Non-trainable params: 130\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "create_model().summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "histories = []\n",
    "for i in range(0,iterations):\n",
    "    print('iteration: %i' % i)\n",
    "    filepath = \"fashion_mnist-%i.hdf5\" % i\n",
    "    X_train_, X_val_, y_train_, y_val_ = train_test_split(X_train_shaped, y_train_shaped,test_size=0.2, random_state=42)\n",
    "    cnn = create_model()\n",
    "    history = cnn.fit(X_train_,y_train_,\n",
    "                      batch_size=batch_size,\n",
    "                      epochs=2,#num_epochs,\n",
    "                      verbose=1,\n",
    "                      validation_data=(X_val_,y_val_),\n",
    "                      callbacks=[\n",
    "                          tf.keras.callbacks.ModelCheckpoint(filepath,monitor= 'val_loss',verbose=1,save_best_only=True)\n",
    "                      ])\n",
    "    histories.append(history.history)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_avg(histories,his_key):\n",
    "    tmp=[]\n",
    "    for history in histories:\n",
    "        tmp.append(history[his_key][np.argmin( history['val_loss'])])\n",
    "    return np.mean(tmp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集:\tnan loss /nan acc\n",
      "验证集:\tnan loss /nan0.8f acc\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "F:\\anaconda33\\lib\\site-packages\\numpy\\core\\fromnumeric.py:3335: RuntimeWarning: Mean of empty slice.\n",
      "  out=out, **kwargs)\n",
      "F:\\anaconda33\\lib\\site-packages\\numpy\\core\\_methods.py:161: RuntimeWarning: invalid value encountered in double_scalars\n",
      "  ret = ret.dtype.type(ret / rcount)\n"
     ]
    }
   ],
   "source": [
    "print(\"训练集:\\t%0.8f loss /%0.8f acc\"%(get_avg(histories,'loss'),get_avg(histories,'sccuracy')))\n",
    "print (\"验证集:\\t%0.8f loss /%s0.8f acc\"%(get_avg(histories,'vol_loss'),get_avg(histories,'val_accuracy')))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_loss = []\n",
    "test_accs = []\n",
    "for i in range(0,iterations):\n",
    "    cnn_ = tf.keras.models.load_model(\"./fashion_mnist-%i.hdf5\"% i)\n",
    "    score = cnn_.evaluate(X_test_shaped, verbose=0)\n",
    "    test_loss.append(score[0])\n",
    "    test_accs.append(score[1])\n",
    "    print('Running final test with model %i: %0.4f acc' % (i,score[0],score[1]))\n",
    "print('\\nAverage loss / accuracy on testest: %0.4f loss / %0.5f acc' % (np.mean(test_loss),np.mean(test_accs)))\n",
    "print('Standard deviation: (+-%0.4f) loss / (+-%0.4f) acc' % (np.std(test_loss),np.std(test_accs)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_acc_loss(title, histories, key_acc, key_loss):\n",
    "    fig, (ax1, ax2) = plt.subplots(1,2)\n",
    "    ax1.set_title('Model accuracy (%s)' % title)\n",
    "    names = []\n",
    "    for i, model in enumerate(histories):\n",
    "        ax1.plot(model[key_acc])\n",
    "        ax1.set_xlabel('epoch')\n",
    "        names.append('Model %i' % i)\n",
    "        ax1.set_ylabel('accuracy')\n",
    "    ax1.legend(names, loc='upper left')\n",
    "    ax2.set_title('Model loss (%s)' % title)\n",
    "    for model in histories:\n",
    "        ax2.plot(model[key_loss])\n",
    "        ax2.set_xlabel('epoch')\n",
    "      \n",
    "        ax2.set_ylabel('loss')\n",
    "    ax2.legend(names, loc='upper right')\n",
    "    fig.set_size_inches(20, 5)\n",
    "    plt.show()\n",
    "plot_acc_loss('training', histories, 'accuracy', 'loss')\n",
    "plot_acc_loss('validation', histories, 'val_accuracy', 'val_loss')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "RUN=0\n",
    "model=create_model()\n",
    "model.load_weights(\"models/fashion_mnist-%i.hdf5\" % RUN)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_train_val(title,history):\n",
    "\n",
    "    fig,(ax1,ax2) = plt.subplots(1,2)\n",
    "\n",
    "    #Accuracy\n",
    "\n",
    "    ax1.set_title('Model accuracy - %s' % title)\n",
    "\n",
    "    ax1.plot(history['accuracy'])\n",
    "\n",
    "    ax1.plot(history['val_accuracy'])\n",
    "\n",
    "    ax1.set_xlabel('epoch')\n",
    "\n",
    "    ax1.set_ylabel('accuracy')\n",
    "\n",
    "    ax1.legend(['train','validation'],loc='upper left')\n",
    "\n",
    "    #Loss\n",
    "\n",
    "    ax2.set_title('Model loss - %s' % title)\n",
    "\n",
    "    ax2.plot(history['loss'])\n",
    "\n",
    "    ax2.plot(history['val_loss'])\n",
    "\n",
    "    ax2.set_xlabel('epoch')\n",
    "\n",
    "    ax2.set_ylabel('accuracy')\n",
    "\n",
    "    ax2.legend(['train','validation'],loc='upper left')\n",
    "\n",
    "    fig.set_size_inches(20,5)\n",
    "\n",
    "    plt.show()\n",
    "\n",
    "plot_train_val('Model %i' % RUN,histories[RUN])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "plt.rcParams['font.sans-serif']=['SimHei']#用来正常显示中文标签\n",
    "plt.rcParams['axes.unicode_minus']=False#用来正常显示负号\n",
    "def plot_sample_predictions(classes,model,X_test,y_test):\n",
    "    class_ = 0\n",
    "    images_per_row = 5\n",
    "    rows = len(classes) // images_per_row\n",
    "    for i in range(rows):\n",
    "        fig, axis = plt.subplots(1, images_per_row)\n",
    "        for i, axis in enumerate(axis):\n",
    "            elements = np.squeeze(np.argwhere(y_test ==class_))\n",
    "            random = np.random.randint(len(elements))\n",
    "            X = X_test[elements[random]]\n",
    "            y = y_test[elements[random]]\n",
    "            fig.set_size_inches(10,20)\n",
    "            x_reshape = X.reshape([1,img_height,img_width,channels])\n",
    "            axis.text(0,32,'Predicted:{}'.format(classes[np.argmax(model.predict(x_reshape))]))\n",
    "            axis.text(0,36,'Correct:{}'.format((classes)[y]))\n",
    "            axis.imshow(np.squeeze(X),cmap='gray')\n",
    "            axis.axis('off')\n",
    "            class_ += 1\n",
    "    plt.show()\n",
    "plot_sample_predictions(list(fashion_classes.values()),model, X_test_shaped, y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import confusion_matrix\n",
    "import itertools\n",
    "def plot_confusion_matrix(cm,class_, title='Confusion matrix',cmap=plt.cm.Reds):\n",
    "    plt.imshow(cm,interpolation='nearest', cmap=cmap)\n",
    "    plt.title(title)\n",
    "    tick_marks = np.arange(len(class_))\n",
    "    plt.xticks(tick_marks,class_,rotation=90)\n",
    "    plt.yticks(tick_marks,class_)\n",
    "    fnt='d'\n",
    "    thresh = cm.max()/ 2.\n",
    "    for i,j in itertools.product(range (cm.shape[0]),range(cm.shape[1])):\n",
    "        plt.text(j,i, format(cm[i,j],fnt), horizontalalignment=\"center\", color=\"white\"if cm[i,j]> thresh else \"black\")\n",
    "    plt.ylabel('True labels')\n",
    "    plt.xlabel('Predicted labels')\n",
    "    plt.show()\n",
    "predictions = model.predict_classes(X_test_shaped,verbose=0)\n",
    "plot_confusion_matrix(confusion_matrix(y_test, predictions),list(fashion_classes.values()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#选择第一个模型\n",
    "RUN=0\n",
    "model=create_model()\n",
    "model.load_weights(\"models/fashion_mnist-%i.hdf5\" % RUN)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image\n",
    "import matplotlib.pyplot as plt\n",
    "import os\n",
    "%matplotlib inline\n",
    "file_dir = \"./testdata/\"\n",
    "data = []\n",
    "for n in os.listdir(file_dir):\n",
    "    if(n.lower().endswith(('.bmp','.dib','.png','.jpg','.jpeg','.pbm','.pgm','.ppm','.tif','.tiff'))):\n",
    "        data.append(n)\n",
    "print(data)\n",
    "plt.figure(figsize=(10,10))\n",
    "m = len(data)\n",
    "for i in range(1,m):\n",
    "    plt.subplot(m/4+1,4,i)\n",
    "    data_dir = file_dir + data[i]\n",
    "    img = Image.open(data_dir)\n",
    "    plt.imshow(img)\n",
    "    plt.xlabel(data[i])\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for n in os.listdir(file_dir):\n",
    "    num=[]\n",
    "    if(n.lower().endswith(('.bmp','.dib','.png','.jpg','.jpeg','.pbm','.pgm','.ppm','.tif','.tiff'))):\n",
    "        data_dir = file_dir+n\n",
    "        im=Image.open(data_dir)\n",
    "        im =im.convert('L')\n",
    "        im = im.resize((28,28),Image.ANTIALIAS)\n",
    "        image_list= []\n",
    "        for x in range(28):\n",
    "            scanline_list=[]\n",
    "            for y in range(28):\n",
    "                pixel = im.getpixel((y,x))\n",
    "                pixel = 255.0-pixel\n",
    "                pixel= pixel/255.0\n",
    "                scanline_list.append (pixel)\n",
    "            image_list.append(scanline_list)\n",
    "        arr1 = np.array(image_list).reshape((1, 28,28,1))\n",
    "        classes=list(fashion_classes.values())\n",
    "        prediction = classes[np.argmax(model.predict(arr1))]    \n",
    "        print (n+\"预测为\"+prediction)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
