{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/core/audio.py:37: UserWarning: Could not import scikits.samplerate. Falling back to scipy.signal\n",
      "  warnings.warn('Could not import scikits.samplerate. '\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "/usr/local/lib/python3.5/dist-packages/librosa/cache.py:36: DeprecationWarning: The 'cachedir' attribute has been deprecated in version 0.12 and will be removed in version 0.14.\n",
      "Use os.path.join(memory.location, 'joblib') attribute instead.\n",
      "  if self.cachedir is not None:\n",
      "Using TensorFlow backend.\n",
      "/usr/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: compiletime version 3.4 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.5\n",
      "  return f(*args, **kwds)\n",
      "/usr/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: builtins.type size changed, may indicate binary incompatibility. Expected 432, got 412\n",
      "  return f(*args, **kwds)\n"
     ]
    }
   ],
   "source": [
    "import librosa\n",
    "import numpy as np\n",
    "import os\n",
    "import cv2\n",
    "import pickle\n",
    "from keras.utils import np_utils\n",
    "\n",
    "def get_labels(data_dir):\n",
    "    labels = []\n",
    "    files = os.listdir(data_dir)\n",
    "    files.sort()\n",
    "    for file in files:\n",
    "        catname_full, _ = os.path.splitext(file)\n",
    "        catname = catname_full.split('_')[-1]\n",
    "        labels.append(catname)\n",
    "    print(labels)\n",
    "    return labels\n",
    "\n",
    "def load_data(data_dir): #From Directory\n",
    "    MAX_NUM = 1000\n",
    "    x_load = []\n",
    "    y_load = []\n",
    "    labels = get_labels(data_dir)\n",
    "    dirs = labels\n",
    "    for cat in dirs: #load directory\n",
    "        files_dir = data_dir + cat \n",
    "        files = os.listdir(files_dir)\n",
    "        for file in files[:MAX_NUM]:\n",
    "            file_path = os.path.join(files_dir,file)\n",
    "            mfccs = get_mfcc(file_path) # shape (20 , 32)\n",
    "            x = np.array(mfccs).astype('float32')\n",
    "            x_load.append(x)\n",
    "            y_load.append(labels.index(cat))  # directory name as label\n",
    "    return x_load,y_load\n",
    "\n",
    "def dump_picle(features, labels):\n",
    "    features = np.array(features).astype('float32')\n",
    "    labels = np.array(labels).astype('float32')\n",
    "    print(features.shape)\n",
    "    print(labels.shape)\n",
    "    features=features.reshape(features.shape[0]*features.shape[1], features.shape[2])\n",
    "    labels=labels.reshape(labels.shape[0]*labels.shape[1], labels.shape[2])\n",
    "    print(features.shape)\n",
    "    print(labels.shape)\n",
    "    with open(\"features\", \"wb\") as f:\n",
    "        pickle.dump(features, f, protocol=4)\n",
    "    with open(\"labels\", \"wb\") as f:\n",
    "        pickle.dump(labels, f, protocol=4)\n",
    "\n",
    "def loadFromPickle():\n",
    "    with open(\"features\", \"rb\") as f:\n",
    "        features = np.array(pickle.load(f))\n",
    "    with open(\"labels\", \"rb\") as f:\n",
    "        labels = np.array(pickle.load(f))\n",
    "    return features, labels\n",
    "def dump_label_name(dirs):\n",
    "    with open(\"label_names\", \"wb\") as f:\n",
    "        pickle.dump(dirs, f, protocol=4)    \n",
    "def load_label_name():\n",
    "    with open(\"label_names\", \"rb\") as f:\n",
    "        dirs = np.array(pickle.load(f))\n",
    "    return dirs\n",
    "def prepress_labels(labels):\n",
    "    labels = np_utils.to_categorical(labels) # one-hot编码 把类别id转换为表示当前类别的向量，比如0 1 2 =》 [[1 0 0] [0 1 0] [0 0 1]]\n",
    "    return labels\n",
    "\n",
    "def write_image(cat,index,array):\n",
    "    dest_dir = os.path.join(to_dir,cat)\n",
    "    if os.path.exists(dest_dir)==False:\n",
    "        os.makedirs(dest_dir)\n",
    "    filename = cat + \"_\" +str(index) + \".png\"\n",
    "    path = os.path.join(dest_dir,filename)\n",
    "    cv2.imwrite( path , array)\n",
    "\n",
    "def load_npy_data(data_dir):\n",
    "    x_load = []\n",
    "    y_load = []\n",
    "    MAX_NUM = 100\n",
    "    labels = get_labels(data_dir)\n",
    "    dirs = labels\n",
    "    write_img_file = False\n",
    "    files = os.listdir(data_dir)\n",
    "    for file in files[:]:\n",
    "        catname_full,_ = os.path.splitext(file)\n",
    "        catname = catname_full.split('_')[-1]\n",
    "        file = os.path.join(data_dir,file) \n",
    "        cat = os.path.basename(file)\n",
    "        imgs = np.load(file)\n",
    "        print(imgs.shape)\n",
    "        # print(np.shape(imgs)) # (133572, 784)\n",
    "        # imgs = imgs.astype('float32') / 255.\n",
    "        imgs = imgs[:MAX_NUM, :]\n",
    "        x_load.append(imgs)\n",
    "        y = [labels.index(catname) for _ in range(MAX_NUM)]\n",
    "        y = np.array(y).astype('float32')\n",
    "        y = y.reshape(y.shape[0], 1)\n",
    "        y_load.append(y)\n",
    "        if write_img_file:\n",
    "            for index,img in enumerate(imgs):\n",
    "                img = img.reshape(28,28)\n",
    "                write_image(catname,index,img)\n",
    "    return x_load,y_load"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Untitled.ipynb\tfeatures  labels  npy_data  static\r\n"
     ]
    }
   ],
   "source": [
    "!ls"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "conv2d_11 (Conv2D)           (None, 24, 24, 8)         208       \n",
      "_________________________________________________________________\n",
      "max_pooling2d_11 (MaxPooling (None, 12, 12, 8)         0         \n",
      "_________________________________________________________________\n",
      "conv2d_12 (Conv2D)           (None, 8, 8, 8)           1608      \n",
      "_________________________________________________________________\n",
      "max_pooling2d_12 (MaxPooling (None, 4, 4, 8)           0         \n",
      "_________________________________________________________________\n",
      "flatten_6 (Flatten)          (None, 128)               0         \n",
      "_________________________________________________________________\n",
      "dense_11 (Dense)             (None, 128)               16512     \n",
      "_________________________________________________________________\n",
      "dropout_6 (Dropout)          (None, 128)               0         \n",
      "_________________________________________________________________\n",
      "dense_12 (Dense)             (None, 5)                 645       \n",
      "=================================================================\n",
      "Total params: 18,973\n",
      "Trainable params: 18,973\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "Train on 1200 samples, validate on 300 samples\n",
      "Epoch 1/10\n",
      "1200/1200 [==============================] - 10s 8ms/step - loss: 12.8875 - acc: 0.1833 - val_loss: 11.5847 - val_acc: 0.2267\n",
      "Epoch 2/10\n",
      "1200/1200 [==============================] - 4s 3ms/step - loss: 11.1038 - acc: 0.2767 - val_loss: 8.2805 - val_acc: 0.4200\n",
      "Epoch 3/10\n",
      "1200/1200 [==============================] - 4s 3ms/step - loss: 10.1674 - acc: 0.3292 - val_loss: 7.8959 - val_acc: 0.4700\n",
      "Epoch 4/10\n",
      "1200/1200 [==============================] - 4s 3ms/step - loss: 9.4134 - acc: 0.3625 - val_loss: 6.7541 - val_acc: 0.5200\n",
      "Epoch 5/10\n",
      "1200/1200 [==============================] - 4s 3ms/step - loss: 7.7987 - acc: 0.4625 - val_loss: 4.8796 - val_acc: 0.6133\n",
      "Epoch 6/10\n",
      "1200/1200 [==============================] - 4s 3ms/step - loss: 6.7306 - acc: 0.5308 - val_loss: 3.6798 - val_acc: 0.6867\n",
      "Epoch 7/10\n",
      "1200/1200 [==============================] - 4s 3ms/step - loss: 5.7404 - acc: 0.5750 - val_loss: 2.9658 - val_acc: 0.7233\n",
      "Epoch 8/10\n",
      "1200/1200 [==============================] - 4s 3ms/step - loss: 4.9143 - acc: 0.6125 - val_loss: 2.6958 - val_acc: 0.7600\n",
      "Epoch 9/10\n",
      "1200/1200 [==============================] - 4s 3ms/step - loss: 4.1433 - acc: 0.6650 - val_loss: 2.4248 - val_acc: 0.7800\n",
      "Epoch 10/10\n",
      "1200/1200 [==============================] - 4s 3ms/step - loss: 3.8817 - acc: 0.6667 - val_loss: 2.1628 - val_acc: 0.7867\n",
      "Test loss: 2.1628294237454733\n",
      "Test accuracy: 0.7866666658719381\n"
     ]
    }
   ],
   "source": [
    "# thanks to https://github.com/akshaybahadur21/QuickDraw\n",
    "import h5py\n",
    "import numpy as np\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.utils import shuffle\n",
    "from keras.layers import Dense,Flatten, Conv2D\n",
    "from keras.layers import MaxPooling2D, Dropout\n",
    "from keras.utils import np_utils, print_summary\n",
    "import tensorflow as tf\n",
    "from keras.models import Sequential\n",
    "from keras.callbacks import ModelCheckpoint\n",
    "import pickle\n",
    "import keras\n",
    "from keras.callbacks import TensorBoard\n",
    "\n",
    "from data_utils import loadFromPickle,prepress_labels,get_labels\n",
    "\n",
    "def keras_model1(input_shape,num_classes):\n",
    "    # 构建模型\n",
    "    model = Sequential()\n",
    "    model.add(Dense(512, activation='relu',input_shape=input_shape))\n",
    "    model.add(Dense(128, activation='relu'))\n",
    "    model.add(Dense(num_classes, activation='softmax'))\n",
    "    # [编译模型] 配置模型，损失函数采用交叉熵，优化采用Adadelta，将识别准确率作为模型评估\n",
    "    model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])\n",
    "    filepath = \"model1.h5\"\n",
    "    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n",
    "    callbacks_list = [checkpoint]\n",
    "    return model, callbacks_list\n",
    "\n",
    "def keras_model(input_shape,num_classes):\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(8, (5, 5), input_shape=input_shape, activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))\n",
    "    model.add(Conv2D(8, (5, 5), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))\n",
    "\n",
    "    model.add(Flatten())\n",
    "    #model.add(Dense(512, activation='relu'))\n",
    "    #model.add(Dropout(0.6))\n",
    "    model.add(Dense(128, activation='relu'))\n",
    "    model.add(Dropout(0.6))\n",
    "    model.add(Dense(num_classes, activation='softmax'))\n",
    "\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    filepath = \"model.h5\"\n",
    "    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n",
    "    callbacks_list = [checkpoint]\n",
    "    return model, callbacks_list\n",
    "\n",
    "def test_model():\n",
    "    features, labels = loadFromPickle()\n",
    "    labels_count = len(load_label_name())\n",
    "    # features, labels = augmentData(features, labels)\n",
    "    features, labels = shuffle(features, labels)\n",
    "    labels=prepress_labels(labels)\n",
    "    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,test_size=0.2)\n",
    "    train_x = train_x.reshape(train_x.shape[0], 28, 28, 1)\n",
    "    test_x = test_x.reshape(test_x.shape[0], 28, 28, 1)\n",
    "    model, callbacks_list = keras_model( (28,28,1,) , labels_count ) \n",
    "    print_summary(model)\n",
    "    model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=10, batch_size=128)\n",
    "    # 开始评估模型效果 # verbose=0为不输出日志信息\n",
    "    score = model.evaluate(test_x, test_y, verbose=0)\n",
    "    print('Test loss:', score[0])\n",
    "    print('Test accuracy:', score[1]) # 准确度       \n",
    "    model.save('model.h5')\n",
    "\n",
    "def test_model1():\n",
    "    features, labels = loadFromPickle()\n",
    "    labels_count = len(load_label_name())\n",
    "    # features, labels = augmentData(features, labels)\n",
    "    features, labels = shuffle(features, labels)\n",
    "    labels=prepress_labels(labels)\n",
    "    train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0,test_size=0.1)\n",
    "    # train_x = train_x.reshape(train_x.shape[0], 28, 28, 1)\n",
    "    # test_x = test_x.reshape(test_x.shape[0], 28, 28, 1)\n",
    "    model, callbacks_list = keras_model1( (28*28,) , labels_count ) \n",
    "    print_summary(model)\n",
    "    model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=20, batch_size=128)\n",
    "\n",
    "    # 开始评估模型效果 # verbose=0为不输出日志信息\n",
    "    score = model.evaluate(test_x, test_y, verbose=0)\n",
    "    print('Test loss:', score[0])\n",
    "    print('Test accuracy:', score[1]) # 准确度    \n",
    "\n",
    "    model.save('model1.h5')\n",
    "\n",
    "test_model()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " * Serving Flask app \"__main__\" (lazy loading)\n",
      " * Environment: production\n",
      "   WARNING: Do not use the development server in a production environment.\n",
      "   Use a production WSGI server instead.\n",
      " * Debug mode: off\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.5/dist-packages/ipykernel_launcher.py:40: DeprecationWarning: The binary mode of fromstring is deprecated, as it behaves surprisingly on unicode inputs. Use frombuffer instead\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(2245,)\n",
      "(28, 28)\n",
      "may be  rainbow  probab:  [0.00e+00 1.00e+00 0.00e+00 0.00e+00 1.53e-43]\n",
      "小白：我猜你画的是rainbow\n",
      "(2032,)\n",
      "(28, 28)\n",
      "may be  umbrella  probab:  [0. 0. 0. 0. 1.]\n",
      "小白：我猜你画的是umbrella\n"
     ]
    }
   ],
   "source": [
    "from flask import Flask\n",
    "from flask import request,Response\n",
    "from flask import render_template\n",
    "from keras.models import load_model\n",
    "import tensorflow as tf\n",
    "import base64\n",
    "import numpy as np\n",
    "import cv2\n",
    "import os\n",
    "import json\n",
    "\n",
    "import sys\n",
    "root_dir = \"/xiaobai/\"\n",
    "sys.path.append(root_dir)\n",
    "from xiaobai import XiaoBai,BaseSkill\n",
    "keyword_model = root_dir+'resources/小白.pmdl'\n",
    "xiaobai = XiaoBai(keyword_model=keyword_model)\n",
    "\n",
    "from data_utils import get_labels\n",
    "\n",
    "app = Flask(__name__)\n",
    "\n",
    "graph = tf.get_default_graph()\n",
    "# model = load_model('model1.h5') # 加载训练模型\n",
    "model = load_model('model.h5') # 加载训练模型\n",
    "label_names =  load_label_name().tolist()\n",
    "\n",
    "@app.route('/labels')\n",
    "def get_labels():\n",
    "    # return json.dumps(label_names)\n",
    "    return Response(json.dumps(label_names), mimetype='application/json')\n",
    "\n",
    "@app.route('/',methods=['GET', 'POST'])\n",
    "def index():\n",
    "    if request.method == 'POST':\n",
    "        # print(request.form)\n",
    "        img_b64encode = request.form.get(\"base64img\",\"\")\n",
    "        # img_b64encode = \".......\"\n",
    "        img_b64decode = base64.b64decode(img_b64encode[23:])  # base64解码\n",
    "        img_array = np.fromstring(img_b64decode,np.uint8) # 转换np序列\n",
    "        # img_array = np.fromstring(img_b64decode,np.float32) # 转换np序列\n",
    "        print(img_array.shape)\n",
    "        img = cv2.imdecode(img_array,cv2.COLOR_BGR2RGB)  # 转换Opencv格式\n",
    "        img = cv2.resize(img, (28, 28))\n",
    "        gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n",
    "        print(gray.shape)\n",
    "        with graph.as_default():\n",
    "            pred_probab = keras_predict(model, gray)\n",
    "            pred_class = list(pred_probab[0]).index(max(pred_probab[0]))\n",
    "            print(\"may be \" , label_names[pred_class] , \" probab: \" ,  pred_probab[0])\n",
    "            formated = list( map(lambda x,i : (x.item(),label_names[i]) , pred_probab[0],[i for i in range(len(label_names))]) )\n",
    "            xiaobai.speak(\"我猜你画的是\"+label_names[pred_class])\n",
    "            return json.dumps(sorted(formated,reverse=True))\n",
    "    else :\n",
    "        # return \"hello\"\n",
    "        return app.send_static_file('index.html')\n",
    "\n",
    "def keras_predict(model, image):\n",
    "    processed = keras_process_image(image)\n",
    "    # print(\"processed: \" + str(processed.shape))\n",
    "    pred_probab = model.predict(processed)\n",
    "    return pred_probab\n",
    "\n",
    "def keras_process_image(img):\n",
    "    image_x = 28\n",
    "    image_y = 28\n",
    "    img = cv2.resize(img, (image_x, image_y))\n",
    "    img = np.array(img, dtype=np.float32)\n",
    "    img = np.reshape(img, (-1,image_x, image_y,1))    \n",
    "    #img = np.reshape(img, (-1 , image_x*image_y))\n",
    "    return img\n",
    "\n",
    "if __name__ == '__main__':  # pragma: no cover\n",
    "    app.run(host=\"0.0.0.0\",port=80)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['book', 'rainbow', 'sun', 't-shirt', 'umbrella']"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "label_names.tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
