{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "数据集：https://serv.cusp.nyu.edu/projects/urbansounddataset/  \n",
    "\n",
    "librosa：https://github.com/librosa/librosa  \n",
    "\n",
    "分类：  \n",
    "0 = air_conditioner  \n",
    "1 = car_horn  \n",
    "2 = children_playing  \n",
    "3 = dog_bark  \n",
    "4 = drilling  \n",
    "5 = engine_idling  \n",
    "6 = gun_shot  \n",
    "7 = jackhammer  \n",
    "8 = siren  \n",
    "9 = street_music  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import os\n",
    "import matplotlib.pyplot as plt\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import time\n",
    "import librosa # pip install librosa\n",
    "from tqdm import tqdm # pip install tqdm\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Parameters:\n",
      "BATCH_SIZE=50\n",
      "CHECKPOINT_EVERY=500\n",
      "DEV_SAMPLE_PERCENTAGE=0.2\n",
      "DROPOUT_KEEP_PROB=0.5\n",
      "EVALUATE_EVERY=50\n",
      "LR=0.005\n",
      "N_CLASSES=10\n",
      "N_HIDDEN=300\n",
      "N_INPUTS=40\n",
      "NUM_CHECKPOINTS=2\n",
      "NUM_EPOCHS=100\n",
      "PARENT_DIR=audio/\n",
      "TR_SUB_DIRS=['fold1/', 'fold2/', 'fold3/']\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# Parameters\n",
    "# ==================================================\n",
    "\n",
    "# Data loading params\n",
    "# validation数据集占比\n",
    "tf.flags.DEFINE_float(\"dev_sample_percentage\", .2, \"Percentage of the training data to use for validation\")\n",
    "# 父目录\n",
    "tf.flags.DEFINE_string(\"parent_dir\", \"audio/\", \"Data source for the data.\")\n",
    "# 子目录\n",
    "tf.flags.DEFINE_string(\"tr_sub_dirs\", ['fold1/','fold2/','fold3/'], \"Data source for the data.\")\n",
    "\n",
    "# Model Hyperparameters\n",
    "# 第一层输入，MFCC信号\n",
    "tf.flags.DEFINE_integer(\"n_inputs\", 40, \"Number of MFCCs (default: 40)\")\n",
    "# cell个数\n",
    "tf.flags.DEFINE_string(\"n_hidden\", 300, \"Number of cells (default: 300)\")\n",
    "# 分类数\n",
    "tf.flags.DEFINE_integer(\"n_classes\", 10, \"Number of classes (default: 10)\")\n",
    "# 学习率\n",
    "tf.flags.DEFINE_integer(\"lr\", 0.005, \"Learning rate (default: 0.005)\")\n",
    "# dropout参数\n",
    "tf.flags.DEFINE_float(\"dropout_keep_prob\", 0.5, \"Dropout keep probability (default: 0.5)\")\n",
    "\n",
    "# Training parameters\n",
    "# 批次大小\n",
    "tf.flags.DEFINE_integer(\"batch_size\", 50, \"Batch Size (default: 50)\")\n",
    "# 迭代周期\n",
    "tf.flags.DEFINE_integer(\"num_epochs\", 100, \"Number of training epochs (default: 100)\")\n",
    "# 多少step测试一次\n",
    "tf.flags.DEFINE_integer(\"evaluate_every\", 50, \"Evaluate model on dev set after this many steps (default: 50)\")\n",
    "# 多少step保存一次模型\n",
    "tf.flags.DEFINE_integer(\"checkpoint_every\", 500, \"Save model after this many steps (default: 500)\")\n",
    "# 最多保存多少个模型\n",
    "tf.flags.DEFINE_integer(\"num_checkpoints\", 2, \"Number of checkpoints to store (default: 2)\")\n",
    "\n",
    "# flags解析\n",
    "FLAGS = tf.flags.FLAGS\n",
    "FLAGS._parse_flags()\n",
    "\n",
    "# 打印所有参数\n",
    "print(\"\\nParameters:\")\n",
    "for attr, value in sorted(FLAGS.__flags.items()):\n",
    "    print(\"{}={}\".format(attr.upper(), value))\n",
    "print(\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 获得训练用的wav文件路径列表  \n",
    "def get_wav_files(parent_dir,sub_dirs): \n",
    "    wav_files = []  \n",
    "    for l, sub_dir in enumerate(sub_dirs):\n",
    "        wav_path = os.path.join(parent_dir, sub_dir)\n",
    "        for (dirpath, dirnames, filenames) in os.walk(wav_path):  \n",
    "            for filename in filenames:  \n",
    "                if filename.endswith('.wav') or filename.endswith('.WAV'):  \n",
    "                    filename_path = os.sep.join([dirpath, filename])  \n",
    "                    wav_files.append(filename_path)  \n",
    "    return wav_files  \n",
    "\n",
    "# 获取文件mfcc特征和对应标签\n",
    "def extract_features(wav_files):\n",
    "    inputs = []\n",
    "    labels = []\n",
    "    \n",
    "    for wav_file in tqdm(wav_files):\n",
    "        # 读入音频文件\n",
    "        audio,fs = librosa.load(wav_file)\n",
    "\n",
    "        # 获取音频mfcc特征\n",
    "        # [n_steps, n_inputs]\n",
    "        mfccs = np.transpose(librosa.feature.mfcc(y=audio, sr=fs, n_mfcc=FLAGS.n_inputs), [1,0]) \n",
    "        inputs.append(mfccs.tolist()) \n",
    "    #获取label\n",
    "    for wav_file in wav_files:\n",
    "        label = wav_file.split('/')[-1].split('-')[1]\n",
    "        labels.append(label) \n",
    "    return inputs, np.array(labels, dtype=np.int)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 获得训练用的wav文件路径列表 \n",
    "wav_files = get_wav_files(FLAGS.parent_dir,FLAGS.tr_sub_dirs)\n",
    "# 获取文件mfcc特征和对应标签\n",
    "tr_features,tr_labels = extract_features(wav_files)\n",
    "\n",
    "np.save('tr_features.npy',tr_features)\n",
    "np.save('tr_labels.npy',tr_labels)\n",
    "\n",
    "# tr_features=np.load('tr_features.npy')\n",
    "# tr_labels=np.load('tr_labels.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "max_len: 173\n"
     ]
    }
   ],
   "source": [
    "#(batch,step,input)\n",
    "#(50,173,40)\n",
    "\n",
    "# 计算最长的step\n",
    "wav_max_len = max([len(feature) for feature in tr_features])\n",
    "print(\"max_len:\",wav_max_len)\n",
    "\n",
    "# 填充0\n",
    "tr_data = []\n",
    "for mfccs in tr_features:  \n",
    "    while len(mfccs) < wav_max_len: #只要小于wav_max_len就补n_inputs个0\n",
    "        mfccs.append([0] * FLAGS.n_inputs) \n",
    "    tr_data.append(mfccs)\n",
    "\n",
    "tr_data = np.array(tr_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# Randomly shuffle data\n",
    "np.random.seed(10)\n",
    "shuffle_indices = np.random.permutation(np.arange(len(tr_data)))\n",
    "x_shuffled = tr_data[shuffle_indices]\n",
    "y_shuffled = tr_labels[shuffle_indices]\n",
    "\n",
    "# Split train/test set\n",
    "# TODO: This is very crude, should use cross-validation\n",
    "# 数据集切分为两部分\n",
    "dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y_shuffled)))\n",
    "train_x, test_x = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]\n",
    "train_y, test_y = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# placeholder\n",
    "x = tf.placeholder(\"float\", [None, wav_max_len, FLAGS.n_inputs])\n",
    "y = tf.placeholder(\"float\", [None])\n",
    "dropout = tf.placeholder(tf.float32)\n",
    "# learning rate\n",
    "lr = tf.Variable(FLAGS.lr, dtype=tf.float32, trainable=False)\n",
    "\n",
    "# 定义RNN网络\n",
    "# 初始化权制和偏置\n",
    "weights = tf.Variable(tf.truncated_normal([FLAGS.n_hidden, FLAGS.n_classes], stddev=0.1))\n",
    "biases = tf.Variable(tf.constant(0.1, shape=[FLAGS.n_classes]))\n",
    "\n",
    "# 多层网络\n",
    "num_layers = 3\n",
    "def grucell():\n",
    "    cell = tf.contrib.rnn.GRUCell(FLAGS.n_hidden)\n",
    "#     cell = tf.contrib.rnn.LSTMCell(FLAGS.n_hidden)\n",
    "    cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=dropout)\n",
    "    return cell\n",
    "cell = tf.contrib.rnn.MultiRNNCell([grucell() for _ in range(num_layers)])\n",
    "\n",
    "\n",
    "outputs,final_state = tf.nn.dynamic_rnn(cell,x,dtype=tf.float32)\n",
    "\n",
    "# 预测值\n",
    "prediction = tf.nn.softmax(tf.matmul(final_state[0],weights) + biases)\n",
    "\n",
    "# labels转one_hot格式\n",
    "one_hot_labels = tf.one_hot(indices=tf.cast(y, tf.int32), depth=FLAGS.n_classes)\n",
    "\n",
    "# loss\n",
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=one_hot_labels))\n",
    "\n",
    "# optimizer\n",
    "optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(cross_entropy)\n",
    "\n",
    "# Evaluate model\n",
    "correct_pred = tf.equal(tf.argmax(prediction,1), tf.argmax(one_hot_labels,1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def batch_iter(data, batch_size, num_epochs, shuffle=True):\n",
    "    \"\"\"\n",
    "        Generates a batch iterator for a dataset.\n",
    "    \"\"\"\n",
    "    data = np.array(data)\n",
    "    data_size = len(data)\n",
    "    # 每个epoch的num_batch\n",
    "    num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n",
    "    print(\"num_batches_per_epoch:\",num_batches_per_epoch)\n",
    "    for epoch in range(num_epochs):\n",
    "        # Shuffle the data at each epoch\n",
    "        if shuffle:\n",
    "            shuffle_indices = np.random.permutation(np.arange(data_size))\n",
    "            shuffled_data = data[shuffle_indices]\n",
    "        else:\n",
    "            shuffled_data = data\n",
    "        for batch_num in range(num_batches_per_epoch):\n",
    "            start_index = batch_num * batch_size\n",
    "            end_index = min((batch_num + 1) * batch_size, data_size)\n",
    "            yield shuffled_data[start_index:end_index]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "num_batches_per_epoch: 43\n",
      "Iter 50, loss 1.95912, tr_acc 0.51350, ts_acc 0.51210, lr 0.00495\n",
      "Iter 100, loss 1.89800, tr_acc 0.58892, ts_acc 0.56797, lr 0.00490\n",
      "Iter 150, loss 1.86715, tr_acc 0.60009, ts_acc 0.58473, lr 0.00485\n",
      "Iter 200, loss 1.86456, tr_acc 0.60708, ts_acc 0.59032, lr 0.00480\n",
      "Iter 250, loss 1.79514, tr_acc 0.68669, ts_acc 0.64991, lr 0.00475\n",
      "Iter 300, loss 1.77812, tr_acc 0.70112, ts_acc 0.64246, lr 0.00471\n",
      "Iter 350, loss 1.76230, tr_acc 0.72393, ts_acc 0.64246, lr 0.00466\n",
      "Iter 400, loss 1.75145, tr_acc 0.72393, ts_acc 0.66853, lr 0.00461\n",
      "Iter 450, loss 1.73301, tr_acc 0.74721, ts_acc 0.67784, lr 0.00457\n",
      "Iter 500, loss 1.70690, tr_acc 0.77002, ts_acc 0.67412, lr 0.00452\n",
      "Saved model checkpoint to sounds_models/model-500\n",
      "\n",
      "Iter 550, loss 1.70369, tr_acc 0.76955, ts_acc 0.68343, lr 0.00448\n",
      "Iter 600, loss 1.69183, tr_acc 0.78259, ts_acc 0.70577, lr 0.00443\n",
      "Iter 650, loss 1.68992, tr_acc 0.78492, ts_acc 0.69832, lr 0.00439\n",
      "Iter 700, loss 1.73857, tr_acc 0.74255, ts_acc 0.64804, lr 0.00434\n",
      "Iter 750, loss 1.73377, tr_acc 0.74209, ts_acc 0.67039, lr 0.00430\n",
      "Iter 800, loss 1.71970, tr_acc 0.75559, ts_acc 0.67784, lr 0.00426\n",
      "Iter 850, loss 1.68073, tr_acc 0.80074, ts_acc 0.75233, lr 0.00421\n",
      "Iter 900, loss 1.67387, tr_acc 0.80214, ts_acc 0.74488, lr 0.00417\n",
      "Iter 950, loss 1.65598, tr_acc 0.82821, ts_acc 0.75605, lr 0.00413\n",
      "Iter 1000, loss 1.61437, tr_acc 0.86266, ts_acc 0.80261, lr 0.00409\n",
      "Saved model checkpoint to sounds_models/model-1000\n",
      "\n",
      "Iter 1050, loss 1.62743, tr_acc 0.85102, ts_acc 0.79702, lr 0.00405\n",
      "Iter 1100, loss 1.60055, tr_acc 0.87477, ts_acc 0.79888, lr 0.00401\n",
      "Iter 1150, loss 1.60181, tr_acc 0.87477, ts_acc 0.81564, lr 0.00397\n",
      "Iter 1200, loss 1.59264, tr_acc 0.88175, ts_acc 0.79888, lr 0.00393\n",
      "Iter 1250, loss 1.58316, tr_acc 0.88873, ts_acc 0.83240, lr 0.00389\n",
      "Iter 1300, loss 1.57479, tr_acc 0.89525, ts_acc 0.81378, lr 0.00385\n",
      "Iter 1350, loss 1.57565, tr_acc 0.89432, ts_acc 0.81378, lr 0.00381\n",
      "Iter 1400, loss 1.56851, tr_acc 0.90363, ts_acc 0.81192, lr 0.00377\n",
      "Iter 1450, loss 1.56865, tr_acc 0.89991, ts_acc 0.81564, lr 0.00374\n",
      "Iter 1500, loss 1.57093, tr_acc 0.89898, ts_acc 0.80074, lr 0.00370\n",
      "Saved model checkpoint to sounds_models/model-1500\n",
      "\n",
      "Iter 1550, loss 1.56587, tr_acc 0.90410, ts_acc 0.81192, lr 0.00366\n",
      "Iter 1600, loss 1.56132, tr_acc 0.90596, ts_acc 0.82123, lr 0.00362\n",
      "Iter 1650, loss 1.55432, tr_acc 0.91108, ts_acc 0.83240, lr 0.00359\n",
      "Iter 1700, loss 1.55300, tr_acc 0.91155, ts_acc 0.83054, lr 0.00355\n",
      "Iter 1750, loss 1.54980, tr_acc 0.91387, ts_acc 0.83799, lr 0.00352\n",
      "Iter 1800, loss 1.54879, tr_acc 0.91387, ts_acc 0.83613, lr 0.00348\n",
      "Iter 1850, loss 1.54744, tr_acc 0.91434, ts_acc 0.83426, lr 0.00345\n",
      "Iter 1900, loss 1.59237, tr_acc 0.87430, ts_acc 0.80261, lr 0.00341\n",
      "Iter 1950, loss 1.58382, tr_acc 0.88128, ts_acc 0.80633, lr 0.00338\n",
      "Iter 2000, loss 1.55718, tr_acc 0.90596, ts_acc 0.83613, lr 0.00334\n",
      "Saved model checkpoint to sounds_models/model-2000\n",
      "\n",
      "Iter 2050, loss 1.55244, tr_acc 0.91341, ts_acc 0.82309, lr 0.00331\n",
      "Iter 2100, loss 1.55559, tr_acc 0.91061, ts_acc 0.82682, lr 0.00328\n",
      "Iter 2150, loss 1.55088, tr_acc 0.91248, ts_acc 0.83799, lr 0.00325\n",
      "Iter 2200, loss 1.54685, tr_acc 0.91480, ts_acc 0.83799, lr 0.00321\n",
      "Iter 2250, loss 1.54560, tr_acc 0.91574, ts_acc 0.84358, lr 0.00318\n",
      "Iter 2300, loss 1.54464, tr_acc 0.91620, ts_acc 0.84544, lr 0.00315\n",
      "Iter 2350, loss 1.54302, tr_acc 0.91760, ts_acc 0.83799, lr 0.00312\n",
      "Iter 2400, loss 1.54142, tr_acc 0.91946, ts_acc 0.83985, lr 0.00309\n",
      "Iter 2450, loss 1.57125, tr_acc 0.90130, ts_acc 0.80633, lr 0.00306\n",
      "Iter 2500, loss 1.57513, tr_acc 0.89944, ts_acc 0.79702, lr 0.00303\n",
      "Saved model checkpoint to sounds_models/model-2500\n",
      "\n",
      "Iter 2550, loss 1.56753, tr_acc 0.90410, ts_acc 0.79702, lr 0.00299\n",
      "Iter 2600, loss 1.55923, tr_acc 0.91155, ts_acc 0.82495, lr 0.00296\n",
      "Iter 2650, loss 1.55180, tr_acc 0.91760, ts_acc 0.82123, lr 0.00294\n",
      "Iter 2700, loss 1.54575, tr_acc 0.92132, ts_acc 0.82682, lr 0.00291\n",
      "Iter 2750, loss 1.54207, tr_acc 0.92644, ts_acc 0.83240, lr 0.00288\n",
      "Iter 2800, loss 1.53986, tr_acc 0.92691, ts_acc 0.82682, lr 0.00285\n",
      "Iter 2850, loss 1.53751, tr_acc 0.92970, ts_acc 0.83054, lr 0.00282\n",
      "Iter 2900, loss 1.53831, tr_acc 0.92877, ts_acc 0.82868, lr 0.00279\n",
      "Iter 2950, loss 1.53275, tr_acc 0.93203, ts_acc 0.83426, lr 0.00276\n",
      "Iter 3000, loss 1.53016, tr_acc 0.93296, ts_acc 0.83240, lr 0.00274\n",
      "Saved model checkpoint to sounds_models/model-3000\n",
      "\n",
      "Iter 3050, loss 1.52798, tr_acc 0.93343, ts_acc 0.83985, lr 0.00271\n",
      "Iter 3100, loss 1.52720, tr_acc 0.93343, ts_acc 0.84916, lr 0.00268\n",
      "Iter 3150, loss 1.52653, tr_acc 0.93389, ts_acc 0.84916, lr 0.00265\n",
      "Iter 3200, loss 1.52619, tr_acc 0.93389, ts_acc 0.85475, lr 0.00263\n",
      "Iter 3250, loss 1.52577, tr_acc 0.93389, ts_acc 0.85102, lr 0.00260\n",
      "Iter 3300, loss 1.52522, tr_acc 0.93529, ts_acc 0.84916, lr 0.00258\n",
      "Iter 3350, loss 1.52475, tr_acc 0.93529, ts_acc 0.85102, lr 0.00255\n",
      "Iter 3400, loss 1.52404, tr_acc 0.93575, ts_acc 0.85102, lr 0.00252\n",
      "Iter 3450, loss 1.52390, tr_acc 0.93575, ts_acc 0.85475, lr 0.00250\n",
      "Iter 3500, loss 1.52377, tr_acc 0.93575, ts_acc 0.85102, lr 0.00247\n",
      "Saved model checkpoint to sounds_models/model-3500\n",
      "\n",
      "Iter 3550, loss 1.52357, tr_acc 0.93575, ts_acc 0.85102, lr 0.00245\n",
      "Iter 3600, loss 1.52344, tr_acc 0.93575, ts_acc 0.85102, lr 0.00242\n",
      "Iter 3650, loss 1.52335, tr_acc 0.93575, ts_acc 0.84916, lr 0.00240\n",
      "Iter 3700, loss 1.52327, tr_acc 0.93575, ts_acc 0.84916, lr 0.00238\n",
      "Iter 3750, loss 1.52321, tr_acc 0.93575, ts_acc 0.85289, lr 0.00235\n",
      "Iter 3800, loss 1.52315, tr_acc 0.93575, ts_acc 0.85475, lr 0.00233\n",
      "Iter 3850, loss 1.52309, tr_acc 0.93575, ts_acc 0.84916, lr 0.00231\n",
      "Iter 3900, loss 1.52299, tr_acc 0.93575, ts_acc 0.84730, lr 0.00228\n",
      "Iter 3950, loss 1.52297, tr_acc 0.93575, ts_acc 0.84916, lr 0.00226\n",
      "Iter 4000, loss 1.52294, tr_acc 0.93575, ts_acc 0.84916, lr 0.00224\n",
      "Saved model checkpoint to sounds_models/model-4000\n",
      "\n",
      "Iter 4050, loss 1.52286, tr_acc 0.93575, ts_acc 0.84544, lr 0.00222\n",
      "Iter 4100, loss 1.52282, tr_acc 0.93575, ts_acc 0.84916, lr 0.00219\n",
      "Iter 4150, loss 1.52279, tr_acc 0.93575, ts_acc 0.84916, lr 0.00217\n",
      "Iter 4200, loss 1.52277, tr_acc 0.93575, ts_acc 0.84730, lr 0.00215\n",
      "Iter 4250, loss 1.52275, tr_acc 0.93575, ts_acc 0.84916, lr 0.00213\n",
      "Iter 4300, loss 1.52273, tr_acc 0.93575, ts_acc 0.84544, lr 0.00211\n"
     ]
    }
   ],
   "source": [
    "# Initializing the variables\n",
    "init = tf.global_variables_initializer()\n",
    "# 定义saver\n",
    "saver = tf.train.Saver()\n",
    "\n",
    "with tf.Session() as sess:\n",
    "    sess.run(init) \n",
    "\n",
    "    # Generate batches\n",
    "    batches = batch_iter(list(zip(train_x, train_y)), FLAGS.batch_size, FLAGS.num_epochs)\n",
    "\n",
    "    for i,batch in enumerate(batches):\n",
    "        i = i + 1\n",
    "        x_batch, y_batch = zip(*batch)\n",
    "        sess.run([optimizer], feed_dict={x: x_batch, y: y_batch, dropout: FLAGS.dropout_keep_prob})\n",
    "        \n",
    "        # 测试\n",
    "        if i % FLAGS.evaluate_every == 0:\n",
    "            sess.run(tf.assign(lr, FLAGS.lr * (0.99 ** (i // FLAGS.evaluate_every))))\n",
    "            learning_rate = sess.run(lr)\n",
    "            tr_acc, _loss = sess.run([accuracy, cross_entropy], feed_dict={x: train_x, y: train_y, dropout: 1.0})\n",
    "            ts_acc = sess.run(accuracy, feed_dict={x: test_x, y: test_y, dropout: 1.0})\n",
    "            print(\"Iter {}, loss {:.5f}, tr_acc {:.5f}, ts_acc {:.5f}, lr {:.5f}\".format(i, _loss, tr_acc, ts_acc, learning_rate))\n",
    "\n",
    "        # 保存模型\n",
    "        if i % FLAGS.checkpoint_every == 0:\n",
    "            path = saver.save(sess, \"sounds_models/model\", global_step=i)\n",
    "            print(\"Saved model checkpoint to {}\\n\".format(path))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "Python [default]",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
