{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from __future__ import print_function\n",
    "from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn\n",
    "import os,time\n",
    "import numpy as np\n",
    "import json\n",
    "from functools import wraps"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "ename": "ValueError",
     "evalue": "Attempt to reuse RNNCell <tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.LSTMCell object at 0x7f1ecfc4f950> with a different variable scope than its first use.  First use of cell was with scope 'bidirectional_rnn/fw/lstm_cell', this attempt is with scope 'bidirectional_rnn/bw/lstm_cell'.  Please create a new instance of the cell if you would like it to use a different set of weights.  If before you were using: MultiRNNCell([LSTMCell(...)] * num_layers), change to: MultiRNNCell([LSTMCell(...) for _ in range(num_layers)]).  If before you were using the same cell instance as both the forward and reverse cell of a bidirectional RNN, simply create two instances (one for forward, one for reverse).  In May 2017, we will start transitioning this cell's behavior to use existing stored weights, if any, when it is called with scope=None (which can lead to silent model degradation, so this error will remain until then.)",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-3-f85a5988d603>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     15\u001b[0m     \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat64\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     16\u001b[0m     \u001b[0msequence_length\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mX_lengths\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 17\u001b[0;31m     inputs=X)\n\u001b[0m\u001b[1;32m     18\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     19\u001b[0m \u001b[0moutput_fw\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput_bw\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.pyc\u001b[0m in \u001b[0;36mbidirectional_dynamic_rnn\u001b[0;34m(cell_fw, cell_bw, inputs, sequence_length, initial_state_fw, initial_state_bw, dtype, parallel_iterations, swap_memory, time_major, scope)\u001b[0m\n\u001b[1;32m    374\u001b[0m           \u001b[0minitial_state\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minitial_state_bw\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    375\u001b[0m           \u001b[0mparallel_iterations\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparallel_iterations\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mswap_memory\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mswap_memory\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 376\u001b[0;31m           time_major=time_major, scope=bw_scope)\n\u001b[0m\u001b[1;32m    377\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    378\u001b[0m   output_bw = _reverse(\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.pyc\u001b[0m in \u001b[0;36mdynamic_rnn\u001b[0;34m(cell, inputs, sequence_length, initial_state, dtype, parallel_iterations, swap_memory, time_major, scope)\u001b[0m\n\u001b[1;32m    551\u001b[0m         \u001b[0mswap_memory\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mswap_memory\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    552\u001b[0m         \u001b[0msequence_length\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msequence_length\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 553\u001b[0;31m         dtype=dtype)\n\u001b[0m\u001b[1;32m    554\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    555\u001b[0m     \u001b[0;31m# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.pyc\u001b[0m in \u001b[0;36m_dynamic_rnn_loop\u001b[0;34m(cell, inputs, initial_state, parallel_iterations, swap_memory, sequence_length, dtype)\u001b[0m\n\u001b[1;32m    718\u001b[0m       \u001b[0mloop_vars\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput_ta\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    719\u001b[0m       \u001b[0mparallel_iterations\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparallel_iterations\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 720\u001b[0;31m       swap_memory=swap_memory)\n\u001b[0m\u001b[1;32m    721\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    722\u001b[0m   \u001b[0;31m# Unpack final output if not using output tuples.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.pyc\u001b[0m in \u001b[0;36mwhile_loop\u001b[0;34m(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name)\u001b[0m\n\u001b[1;32m   2621\u001b[0m     \u001b[0mcontext\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mWhileContext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparallel_iterations\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mback_prop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mswap_memory\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2622\u001b[0m     \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_to_collection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraphKeys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mWHILE_CONTEXT\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2623\u001b[0;31m     \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mBuildLoop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcond\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbody\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloop_vars\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape_invariants\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   2624\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2625\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.pyc\u001b[0m in \u001b[0;36mBuildLoop\u001b[0;34m(self, pred, body, loop_vars, shape_invariants)\u001b[0m\n\u001b[1;32m   2454\u001b[0m       \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mEnter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2455\u001b[0m       original_body_result, exit_vars = self._BuildLoop(\n\u001b[0;32m-> 2456\u001b[0;31m           pred, body, original_loop_vars, loop_vars, shape_invariants)\n\u001b[0m\u001b[1;32m   2457\u001b[0m     \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2458\u001b[0m       \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mExit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.pyc\u001b[0m in \u001b[0;36m_BuildLoop\u001b[0;34m(self, pred, body, original_loop_vars, loop_vars, shape_invariants)\u001b[0m\n\u001b[1;32m   2404\u001b[0m         \u001b[0mstructure\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moriginal_loop_vars\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2405\u001b[0m         flat_sequence=vars_for_body_with_tensor_arrays)\n\u001b[0;32m-> 2406\u001b[0;31m     \u001b[0mbody_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbody\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mpacked_vars_for_body\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   2407\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mnest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_sequence\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbody_result\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2408\u001b[0m       \u001b[0mbody_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mbody_result\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.pyc\u001b[0m in \u001b[0;36m_time_step\u001b[0;34m(time, output_ta_t, state)\u001b[0m\n\u001b[1;32m    701\u001b[0m           \u001b[0mcall_cell\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcall_cell\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    702\u001b[0m           \u001b[0mstate_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstate_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 703\u001b[0;31m           skip_conditionals=True)\n\u001b[0m\u001b[1;32m    704\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    705\u001b[0m       \u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_state\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcall_cell\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.pyc\u001b[0m in \u001b[0;36m_rnn_step\u001b[0;34m(time, sequence_length, min_sequence_length, max_sequence_length, zero_output, state, call_cell, state_size, skip_conditionals)\u001b[0m\n\u001b[1;32m    175\u001b[0m     \u001b[0;31m# steps.  This is faster when max_seq_len is equal to the number of unrolls\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    176\u001b[0m     \u001b[0;31m# (which is typical for dynamic_rnn).\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 177\u001b[0;31m     \u001b[0mnew_output\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_state\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcall_cell\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    178\u001b[0m     \u001b[0mnest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0massert_same_structure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_state\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    179\u001b[0m     \u001b[0mnew_state\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mflatten\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnew_state\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.pyc\u001b[0m in \u001b[0;36m<lambda>\u001b[0;34m()\u001b[0m\n\u001b[1;32m    689\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    690\u001b[0m     \u001b[0minput_t\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpack_sequence_as\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstructure\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mflat_sequence\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minput_t\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 691\u001b[0;31m     \u001b[0mcall_cell\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mlambda\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mcell\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_t\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    692\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    693\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0msequence_length\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.pyc\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs, state, scope)\u001b[0m\n\u001b[1;32m    396\u001b[0m     with _checked_scope(self, scope or \"lstm_cell\",\n\u001b[1;32m    397\u001b[0m                         \u001b[0minitializer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_initializer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 398\u001b[0;31m                         reuse=self._reuse) as unit_scope:\n\u001b[0m\u001b[1;32m    399\u001b[0m       \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_num_unit_shards\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    400\u001b[0m         unit_scope.set_partitioner(\n",
      "\u001b[0;32m/usr/lib/python2.7/contextlib.pyc\u001b[0m in \u001b[0;36m__enter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m     15\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m__enter__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     16\u001b[0m         \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 17\u001b[0;31m             \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     18\u001b[0m         \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     19\u001b[0m             \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"generator didn't yield\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.pyc\u001b[0m in \u001b[0;36m_checked_scope\u001b[0;34m(cell, scope, reuse, **kwargs)\u001b[0m\n\u001b[1;32m     75\u001b[0m             \u001b[0;34m\"this error will remain until then.)\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     76\u001b[0m             % (cell, cell_scope.name, scope_name, type(cell).__name__,\n\u001b[0;32m---> 77\u001b[0;31m                type(cell).__name__))\n\u001b[0m\u001b[1;32m     78\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     79\u001b[0m       \u001b[0mweights_found\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mValueError\u001b[0m: Attempt to reuse RNNCell <tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl.LSTMCell object at 0x7f1ecfc4f950> with a different variable scope than its first use.  First use of cell was with scope 'bidirectional_rnn/fw/lstm_cell', this attempt is with scope 'bidirectional_rnn/bw/lstm_cell'.  Please create a new instance of the cell if you would like it to use a different set of weights.  If before you were using: MultiRNNCell([LSTMCell(...)] * num_layers), change to: MultiRNNCell([LSTMCell(...) for _ in range(num_layers)]).  If before you were using the same cell instance as both the forward and reverse cell of a bidirectional RNN, simply create two instances (one for forward, one for reverse).  In May 2017, we will start transitioning this cell's behavior to use existing stored weights, if any, when it is called with scope=None (which can lead to silent model degradation, so this error will remain until then.)"
     ]
    }
   ],
   "source": [
    "tf.reset_default_graph()\n",
    "\n",
    "# Create input data\n",
    "X = np.random.randn(2, 10, 8)\n",
    "\n",
    "# The second example is of length 6 \n",
    "X[1,6:] = 0\n",
    "X_lengths = [10, 6]\n",
    "\n",
    "cell = tf.contrib.rnn.LSTMCell(num_units=64, state_is_tuple=True)\n",
    "\n",
    "outputs, states  = bidirectional_dynamic_rnn(\n",
    "    cell_fw=cell,\n",
    "    cell_bw=cell,\n",
    "    dtype=tf.float64,\n",
    "    sequence_length=X_lengths,\n",
    "    inputs=X)\n",
    "\n",
    "output_fw, output_bw = outputs\n",
    "states_fw, states_bw = states\n",
    "\n",
    "result = tf.contrib.learn.run_n(\n",
    "    {\"output_fw\": output_fw, \"output_bw\": output_bw, \"states_fw\": states_fw, \"states_bw\": states_bw},\n",
    "    n=1,\n",
    "    feed_dict=None)\n",
    "\n",
    "print(result[0][\"output_fw\"].shape)\n",
    "print(result[0][\"output_bw\"].shape)\n",
    "print(result[0][\"states_fw\"].h.shape)\n",
    "print(result[0][\"states_bw\"].h.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "def describe(func):\n",
    "    ''' wrap function,to add some descriptions for function and its running time\n",
    "    '''\n",
    "    @wraps(func)\n",
    "    def wrapper(*args, **kwargs):\n",
    "        print(func.__name__+'...')\n",
    "        start = time.time()\n",
    "        result = func(*args, **kwargs)\n",
    "        end = time.time()\n",
    "        print(str(func.__name__+' in '+ str(end-start)+' s'))\n",
    "        return result\n",
    "    return wrapper"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "# load 字典表 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "def loadatcvocab(vocabfile='atcvocab.txt'):\n",
    "    char_map = {'<UNK>':0}\n",
    "    index_map = {0:'<UNK>'}\n",
    "    with open(vocabfile, 'r') as f:\n",
    "        for line in f.readlines():\n",
    "            #print(line)\n",
    "            d = json.loads(line)\n",
    "            index_map[d['key']] = d['word']\n",
    "            char_map[d['word']] = d['key']\n",
    "            #print(d['key'],d['word'])\n",
    "    return char_map,index_map"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "def output_to_sequence(lmt ,vocab):\n",
    "    ''' convert the output into sequences of characters or phonemes\n",
    "    '''\n",
    "   \n",
    "    sequences = []\n",
    "    start = 0\n",
    "    sequences.append([])\n",
    "    for i in range(len(lmt[0])):\n",
    "        if lmt[0][i][0] == start:\n",
    "            sequences[start].append(lmt[1][i])\n",
    "        else:\n",
    "            start = start + 1\n",
    "            sequences.append([])\n",
    "\n",
    "    #here, we only print the first sequence of batch\n",
    "    indexes = sequences[0] #here, we only print the first sequence of batch\n",
    "\n",
    "    if vocab != None:\n",
    "        seq = []\n",
    "        for ind in indexes:\n",
    "             seq.append(vocab[ind])\n",
    "        seq = ''.join(seq)\n",
    "        return seq\n",
    "    else:\n",
    "        raise TypeError('vocabaray should be exists!')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "@describe\n",
    "def load_batched_data(mfccPath, labelPath, batchSize,maxTimeSteps=None,npz=False):\n",
    "    '''returns 3-element tuple: batched data (list), maxTimeLength (int), and\n",
    "       total number of samples (int)\n",
    "       if maxTimeSteps not None ，then use max(maxTimeSteps,localmaxlen),temp use only,to be change per batch max\n",
    "    '''\n",
    "    if not npz:\n",
    "        return data_lists_to_batches([np.load(os.path.join(mfccPath, fn)) for fn in os.listdir(mfccPath)],\n",
    "                                 [np.load(os.path.join(labelPath, fn)) for fn in os.listdir(labelPath)],\n",
    "                                 batchSize, maxTimeSteps) + \\\n",
    "                                (len(os.listdir(mfccPath)),)\n",
    "    else:\n",
    "        inputlist = []\n",
    "        targetlist = []\n",
    "        for fn in os.listdir(mfccPath):\n",
    "            it = np.load(os.path.join(mfccPath, fn))\n",
    "            inputlist.append(it['feature'])\n",
    "            targetlist.append(it['label'])\n",
    "        return data_lists_to_batches(inputlist,targetlist, batchSize, maxTimeSteps) +  (len(os.listdir(mfccPath)),)   "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "inputList为num-uttences个（num-feature，timeseqs）列表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "def data_lists_to_batches(inputList, targetList, batchSize, maxTimeSteps=None):\n",
    "    ''' padding the input list to a same dimension, integrate all data into batchInputs\n",
    "    '''\n",
    "    assert len(inputList) == len(targetList)\n",
    "    # dimensions of inputList:batch*39*time_length\n",
    "    print('data_lists_to_batches',len(inputList),inputList[0].shape)\n",
    "    nFeatures = inputList[0].shape[0]\n",
    "    maxLength = 0\n",
    "    for inp in inputList:\n",
    "        # find the max time_length\n",
    "        maxLength = max(maxLength, inp.shape[1])\n",
    "\n",
    "    if(maxTimeSteps):\n",
    "        maxLength = max(maxLength, maxTimeSteps)\n",
    "        \n",
    "    # randIxs is the shuffled index from range(0,len(inputList))\n",
    "    randIxs = np.random.permutation(len(inputList))\n",
    "    start, end = (0, batchSize)\n",
    "    dataBatches = []\n",
    "\n",
    "    while end <= len(inputList):\n",
    "        # batchSeqLengths store the time-length of each sample in a mini-batch\n",
    "        batchSeqLengths = np.zeros(batchSize)\n",
    "\n",
    "        # randIxs is the shuffled index of input list\n",
    "        for batchI, origI in enumerate(randIxs[start:end]):\n",
    "            batchSeqLengths[batchI] = inputList[origI].shape[-1]\n",
    "\n",
    "        batchInputs = np.zeros((maxLength, batchSize, nFeatures))\n",
    "        batchTargetList = []\n",
    "        for batchI, origI in enumerate(randIxs[start:end]):\n",
    "            # padSecs is the length of padding\n",
    "            padSecs = maxLength - inputList[origI].shape[1]\n",
    "            # numpy.pad pad the inputList[origI] with zeos at the tail\n",
    "            batchInputs[:,batchI,:] = np.pad(inputList[origI].T, ((0,padSecs),(0,0)), 'constant', constant_values=0)\n",
    "            # target label\n",
    "            batchTargetList.append(targetList[origI])\n",
    "        dataBatches.append((batchInputs, list_to_sparse_tensor(batchTargetList), batchSeqLengths))\n",
    "        start += batchSize\n",
    "        end += batchSize\n",
    "    return (dataBatches, maxLength)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "def list_to_sparse_tensor(targetList):\n",
    "    ''' turn 2-D List to SparseTensor\n",
    "    '''\n",
    "    indices = [] #index\n",
    "    vals = [] #value     \n",
    "    for tI, target in enumerate(targetList):\n",
    "        for seqI, val in enumerate(target):\n",
    "            indices.append([tI, seqI])\n",
    "            vals.append(val)\n",
    "    shape = [len(targetList), np.asarray(indices).max(axis=0)[1]+1] #shape\n",
    "    return (np.array(indices), np.array(vals), np.array(shape))\n",
    "   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "def build_multi_dynamic_brnn(args,\n",
    "                             maxTimeSteps,\n",
    "                             inputX,\n",
    "                             cell_fn,\n",
    "                             seqLengths,\n",
    "                             time_major=True):\n",
    "    hid_input = inputX\n",
    "    print(hid_input.shape)\n",
    "    # add conv \n",
    "    #with tf.variable_scope('conv1'):\n",
    "    #\tinpt = tf.reshape(inputX,[maxTimeSteps,args.batch_size,args.num_feature,1])\n",
    "    #\tconv1 = build_conv_layer(inpt,[3,3,1,1],1,name='conv1')\n",
    "    #\thid_input = tf.reshape(conv1,[maxTimeSteps,args.batch_size,args.num_feature])\n",
    "    #print(hid_input.shape)\n",
    "    # end conv\n",
    "    for i in range(args.num_layer):\n",
    "        scope = 'DBRNN_' + str(i + 1)\n",
    "        forward_cell = cell_fn(args.num_hidden, activation=args.activation)\n",
    "        backward_cell = cell_fn(args.num_hidden, activation=args.activation)\n",
    "        # tensor of shape: [max_time, batch_size, input_size]\n",
    "\n",
    "        outputs, output_states = bidirectional_dynamic_rnn(forward_cell, backward_cell,\n",
    "                                                           inputs=hid_input,\n",
    "                                                           dtype=tf.float32,\n",
    "                                                           sequence_length=seqLengths,\n",
    "                                                           time_major=True,\n",
    "                                                           scope=scope)\n",
    "        # forward output, backward ouput\n",
    "        # tensor of shape: [max_time, batch_size, input_size]\n",
    "        output_fw, output_bw = outputs\n",
    "        # forward states, backward states\n",
    "        output_state_fw, output_state_bw = output_states\n",
    "        \n",
    "\n",
    "        # output_fb = tf.concat(2, [output_fw, output_bw])\n",
    "        output_fb = tf.concat([output_fw, output_bw], 2)\n",
    "        print(output_fb.shape)\n",
    "        shape = output_fb.get_shape().as_list()\n",
    "        print('shape:',shape[0],shape[1],shape[2])\n",
    "        if(shape[0]):\n",
    "            output_fb = tf.reshape(output_fb, [shape[0], shape[1], 2, int(shape[2] / 2)])\n",
    "        else:\n",
    "            output_fb = tf.reshape(output_fb, [-1, shape[1], 2, int(shape[2] / 2)])\n",
    "        hidden = tf.reduce_sum(output_fb, 2)\n",
    "        hidden = tf.contrib.layers.dropout(hidden, keep_prob=args.keep_prob, is_training=(True))\n",
    "        \n",
    "        print(scope)        \n",
    "        print('inputs',hid_input)\n",
    "        print('outputs',output_fw,output_bw)\n",
    "        print('outputs_state',output_state_fw,output_state_bw)\n",
    "        print('output_fb',output_fb,shape)\n",
    "        \n",
    "        if i != args.num_layer - 1:\n",
    "            hid_input = hidden\n",
    "        else:\n",
    "            outputXrs = tf.reshape(hidden, [-1, args.num_hidden])\n",
    "            print('outputXrs shape:',outputXrs.shape)\n",
    "            # output_list = tf.split(0, maxTimeSteps, outputXrs)\n",
    "            output_list = tf.split(outputXrs, maxTimeSteps, 0)\n",
    "            print('output_list len:',len(output_list))\n",
    "            fbHrs = [tf.reshape(t, [args.batch_size, args.num_hidden]) for t in output_list]\n",
    "            \n",
    "    return fbHrs\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "class dotdict(dict):\n",
    "    __getattr__ = dict.get\n",
    "    __setattr__ = dict.__setitem__\n",
    "    __delattr__ = dict.__delitem__"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "num_epochs = 1\n",
    "batch_size=8\n",
    "num_feature=39\n",
    "num_hidden = 512\n",
    "num_classes=686\n",
    "num_layer=3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "load_batched_data...\n",
      "data_lists_to_batches 12148 (39, 109)\n",
      "load_batched_data in 8.99689483643 s\n"
     ]
    }
   ],
   "source": [
    "batchedData, maxTimeSteps, totalN = load_batched_data('atc_test_feature/train/0/','atc_test_feature/train/0/',batch_size,npz=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "load_batched_data...\n",
      "data_lists_to_batches 1519 (39, 193)\n",
      "load_batched_data in 1.03461289406 s\n"
     ]
    }
   ],
   "source": [
    "devbatchedData, devmaxTimeSteps, devtotalN = load_batched_data('atc_test_feature/dev/0','atc_test_feature/dev/0',batch_size,maxTimeSteps,True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "_,int_map = loadatcvocab('feature/atc/atcvocab.txt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "685\n",
      "1518 3 995 12148\n",
      "189 3 995 1519\n"
     ]
    }
   ],
   "source": [
    "print(len(int_map))\n",
    "print(len(batchedData),len(batchedData[0]),maxTimeSteps,totalN)\n",
    "print(len(devbatchedData),len(devbatchedData[0]),devmaxTimeSteps,devtotalN)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(?, 8, 39)\n",
      "(?, 8, 1024)\n",
      "shape: None 8 1024\n",
      "DBRNN_1\n",
      "inputs Tensor(\"inputX:0\", shape=(?, 8, 39), dtype=float32)\n",
      "outputs Tensor(\"DBRNN_1/fw/fw/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 8, 512), dtype=float32) Tensor(\"ReverseSequence:0\", shape=(?, 8, 512), dtype=float32)\n",
      "outputs_state Tensor(\"DBRNN_1/fw/fw/while/Exit_2:0\", shape=(?, 512), dtype=float32) Tensor(\"DBRNN_1/bw/bw/while/Exit_2:0\", shape=(?, 512), dtype=float32)\n",
      "output_fb Tensor(\"Reshape_1:0\", shape=(?, 8, 2, 512), dtype=float32) [None, 8, 1024]\n",
      "(?, 8, 1024)\n",
      "shape: None 8 1024\n",
      "DBRNN_2\n",
      "inputs Tensor(\"Dropout/dropout/mul:0\", shape=(?, 8, 512), dtype=float32)\n",
      "outputs Tensor(\"DBRNN_2/fw/fw/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 8, 512), dtype=float32) Tensor(\"ReverseSequence_1:0\", shape=(?, 8, 512), dtype=float32)\n",
      "outputs_state Tensor(\"DBRNN_2/fw/fw/while/Exit_2:0\", shape=(?, 512), dtype=float32) Tensor(\"DBRNN_2/bw/bw/while/Exit_2:0\", shape=(?, 512), dtype=float32)\n",
      "output_fb Tensor(\"Reshape_2:0\", shape=(?, 8, 2, 512), dtype=float32) [None, 8, 1024]\n",
      "(?, 8, 1024)\n",
      "shape: None 8 1024\n",
      "DBRNN_3\n",
      "inputs Tensor(\"Dropout_1/dropout/mul:0\", shape=(?, 8, 512), dtype=float32)\n",
      "outputs Tensor(\"DBRNN_3/fw/fw/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 8, 512), dtype=float32) Tensor(\"ReverseSequence_2:0\", shape=(?, 8, 512), dtype=float32)\n",
      "outputs_state Tensor(\"DBRNN_3/fw/fw/while/Exit_2:0\", shape=(?, 512), dtype=float32) Tensor(\"DBRNN_3/bw/bw/while/Exit_2:0\", shape=(?, 512), dtype=float32)\n",
      "output_fb Tensor(\"Reshape_3:0\", shape=(?, 8, 2, 512), dtype=float32) [None, 8, 1024]\n",
      "outputXrs shape: (?, 512)\n",
      "output_list len: 995\n"
     ]
    }
   ],
   "source": [
    "graph = tf.Graph()\n",
    "\n",
    "args ={'level': 'cha',\n",
    "              'rnncell': tf.contrib.rnn.GRUCell,\n",
    "              'batch_size': batch_size,\n",
    "              'num_hidden': num_hidden,\n",
    "              'num_feature': num_feature,\n",
    "              'num_class': num_classes,\n",
    "              'num_layer': num_layer,\n",
    "              'activation': tf.nn.relu,\n",
    "              'optimizer': tf.train.AdamOptimizer,\n",
    "              'learning_rate': 0.0001,\n",
    "              'keep_prob': 0.9,\n",
    "              'grad_clip': 1,\n",
    "              'mode':'train'\n",
    "            }\n",
    "args = dotdict(args)\n",
    "\n",
    "with graph.as_default():\n",
    "    inputX = tf.placeholder(tf.float32,shape=(None, batch_size, num_feature),name='inputX')  \n",
    "    inputXrs = tf.reshape(inputX, [-1, num_feature])\n",
    "    #inputList = tf.split(inputXrs, maxTimeSteps, 0)  # convert inputXrs from [32*maxL,39] to [32,maxL,39]\n",
    "    targetIxs = tf.placeholder(tf.int64,name='targetIxs')\n",
    "    targetVals = tf.placeholder(tf.int32,name='targetVals')\n",
    "    targetShape = tf.placeholder(tf.int64,name='targetShape')\n",
    "    targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)\n",
    "    seqLengths = tf.placeholder(tf.int32, shape=(batch_size),name='seqLengths')\n",
    "    \n",
    "    fbHrs = build_multi_dynamic_brnn(args, maxTimeSteps, inputX, args.rnncell, seqLengths)  \n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "995\n"
     ]
    }
   ],
   "source": [
    "print(len(fbHrs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "995 Tensor(\"fc-layer/fc/add:0\", shape=(8, 686), dtype=float32)\n",
      "Tensor(\"stack:0\", shape=(995, 8, 686), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "with graph.as_default():\n",
    "    with tf.name_scope('fc-layer'):\n",
    "                with tf.variable_scope('fc'):\n",
    "                    weightsClasses = tf.Variable(tf.truncated_normal([args.num_hidden, args.num_class], name='weightsClasses'))\n",
    "                    biasesClasses = tf.Variable(tf.zeros([args.num_class]), name='biasesClasses')\n",
    "                    logits = [tf.matmul(t, weightsClasses) + biasesClasses for t in fbHrs]\n",
    "    print(len(logits),logits[0])                    \n",
    "    logits3d = tf.stack(logits)\n",
    "    print(logits3d)\n",
    "    loss = tf.reduce_mean(tf.nn.ctc_loss(targetY, logits3d, seqLengths))\n",
    "    var_op = tf.global_variables()\n",
    "    var_trainable_op = tf.trainable_variables()    \n",
    "    \n",
    "    if args.grad_clip == -1:\n",
    "        optimizer = tf.train.AdamOptimizer(args.learning_rate).minimize(loss)\n",
    "    else:        \n",
    "        grads, _ = tf.clip_by_global_norm(tf.gradients(loss, var_trainable_op), args.grad_clip)\n",
    "        opti = tf.train.AdamOptimizer(args.learning_rate)\n",
    "        optimizer = opti.apply_gradients(zip(grads, var_trainable_op))\n",
    "    \n",
    "    \n",
    "    predictions = tf.to_int32(tf.nn.ctc_beam_search_decoder(logits3d, seqLengths, merge_repeated=False)[0][0])\n",
    "    \n",
    "    initial_op = tf.global_variables_initializer()\n",
    "    \n",
    "    errorRate = tf.reduce_sum(tf.edit_distance(predictions, targetY, normalize=True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1...\n",
      "total:12148,batch:1/1518,epoch:1/1,usetime:6.17603707314s,train loss=3098.043,mean train CER=17.799\n",
      "total:12148,batch:11/1518,epoch:1/1,usetime:38.3824899197s,train loss=1143.410,mean train CER=3.494\n"
     ]
    }
   ],
   "source": [
    "\n",
    "with tf.Session(graph=graph) as sess:\n",
    "    sess.run(initial_op)\n",
    "    iters = 0\n",
    "    for epoch in range(num_epochs):\n",
    "        print('Epoch {}...'.format(epoch+1))\n",
    "        batchErrors = np.zeros(len(batchedData))\n",
    "        batchRandIxs = np.random.permutation(len(batchedData))\n",
    "        #print(batchErrors.shape,batchRandIxs)\n",
    "        train_start_time = time.time()\n",
    "        for batch, batchOrigI in enumerate(batchRandIxs):           \n",
    "            batchInputs, batchTargetSparse, batchSeqLengths = batchedData[batchOrigI]\n",
    "            batchTargetIxs, batchTargetVals, batchTargetShape = batchTargetSparse\n",
    "            #print(len(batchInputs),batchSeqLengths)\n",
    "            feedDict = {inputX: batchInputs,\n",
    "                        targetIxs: batchTargetIxs,\n",
    "                        targetVals: batchTargetVals,\n",
    "                        targetShape: batchTargetShape,\n",
    "                        seqLengths: batchSeqLengths}\n",
    "            #print(type(feedDict[inputX]),targetIxs,targetVals,targetShape,seqLengths)\n",
    "            _, bloss, pre, y, er = sess.run([optimizer, loss,predictions, targetY, errorRate],feed_dict=feedDict)\n",
    "            \n",
    "            batchErrors[batch] = er\n",
    "            iters +=1\n",
    "            \n",
    "            if batch % 10 == 0 and iters % 50 !=0 :  \n",
    "                train_end_time = time.time()\n",
    "                print('total:{},batch:{}/{},epoch:{}/{},usetime:{}s,train loss={:.3f},mean train CER={:.3f}'.format(\n",
    "                                    totalN, batch+1, len(batchRandIxs), epoch+1, num_epochs, str(train_end_time-train_start_time),bloss, er/batch_size))\n",
    "                train_start_time = time.time()\n",
    "                   \n",
    "            \n",
    "            if iters % 50 == 0:     \n",
    "                print('begin dev test....')\n",
    "                devbatchnum = min(len(devbatchedData),10)\n",
    "                devbatchErrors = np.zeros(devbatchnum)\n",
    "                devbatchRandIxs = np.random.permutation(devbatchnum)\n",
    "                avg_loss = 0.0                \n",
    "                for dbatch in range(devbatchnum):\n",
    "                    dbatchInputs, dbatchTargetSparse, dbatchSeqLengths = devbatchedData[devbatchRandIxs[dbatch]]\n",
    "                    dbatchTargetIxs, dbatchTargetVals, dbatchTargetShape = dbatchTargetSparse\n",
    "                    devfeedDict = {inputX: dbatchInputs,\n",
    "                                 targetIxs: dbatchTargetIxs,\n",
    "                                 targetVals: dbatchTargetVals,\n",
    "                                 targetShape: dbatchTargetShape,\n",
    "                                 seqLengths: dbatchSeqLengths}\n",
    "                    dloss, dpre, dy, der = sess.run([loss, predictions,targetY,errorRate], feed_dict=devfeedDict)\n",
    "                    devbatchErrors[dbatch] = der\n",
    "                    avg_loss += dloss\n",
    "                    if iters % 200 == 0:\n",
    "                        print('Truth :' + output_to_sequence(dy,int_map))\n",
    "                        print('Output:' + output_to_sequence(dpre,int_map))\n",
    "                avg_loss /= devbatchnum\n",
    "                avg_cer = np.mean(devbatchErrors)/ devbatchnum\n",
    "                print('after {} batch dev,avg_loss:{},CER:{}'.format(devbatchnum,avg_loss,avg_cer))\n",
    "            \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "print('ctc_lost parms:',targetY,logits3d,seqLengths)\n",
    "print(type(optimizer),type(predictions),type(initial_op))\n",
    "print(sess)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 126,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "tf.summary.FileWriter('testDBiRNNlog',graph).close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<type 'list'> 971 Tensor(\"Reshape_5:0\", shape=(16, 512), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "print(type(fbHrs),len(fbHrs),fbHrs[0])  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tensor(\"inputX:0\", shape=(971, 16, 39), dtype=float32) Tensor(\"Reshape_3:0\", shape=(15536, 39), dtype=float32) Tensor(\"split_3:0\", shape=(16, 39), dtype=float32)\n"
     ]
    }
   ],
   "source": [
    "print(inputX,inputXrs,inputList[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tensor(\"targetIxs:0\", dtype=int64) Tensor(\"targetVals:0\", dtype=int32) Tensor(\"targetShape:0\", dtype=int64)\n",
      "SparseTensor(indices=Tensor(\"targetIxs:0\", dtype=int64), values=Tensor(\"targetVals:0\", dtype=int32), dense_shape=Tensor(\"targetShape:0\", dtype=int64)) Tensor(\"seqLengths_1:0\", shape=(16,), dtype=int32)\n",
      "Tensor(\"seqLengths_1:0\", shape=(16,), dtype=int32)\n"
     ]
    }
   ],
   "source": [
    "print(targetIxs,targetVals,targetShape)\n",
    "print(targetY,seqLengths)\n",
    "print(seqLengths)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "a = tf.placeholder(tf.float32,shape=[971,16,512])\n",
    "b = tf.placeholder(tf.float32,shape=[971,16,512])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "ab=tf.concat([a,b],2)\n",
    "ab\n",
    "shape = ab.get_shape().as_list()\n",
    "shape\n",
    "cd = tf.reshape(ab,[shape[0], shape[1], 2, int(shape[2] / 2)])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor 'Reshape:0' shape=(971, 16, 2, 512) dtype=float32>"
      ]
     },
     "execution_count": 100,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "ef = tf.reduce_sum(cd,2)\n",
    "cd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor 'Sum_4:0' shape=(971, 16, 512) dtype=float32>"
      ]
     },
     "execution_count": 101,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ef"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "aa=tf.contrib.layers.dropout(ef, keep_prob=0.9, is_training=(True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor 'Dropout/dropout/mul:0' shape=(971, 16, 512) dtype=float32>"
      ]
     },
     "execution_count": 103,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "aa"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
