{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Traffic flow forecast with Tensor lstm\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### step 1: Flags for training configuration"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Flags configuration loaded ...\n"
     ]
    }
   ],
   "source": [
    "\"\"\"Functions for downloading and reading time series data.\"\"\"\n",
    "\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import numpy as np\n",
    "from six.moves import xrange  # pylint: disable=redefined-builtin\n",
    "\n",
    "from tensorflow.contrib.learn.python.learn.datasets import base\n",
    "from tensorflow.python.framework import random_seed\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow.contrib import rnn\n",
    "# from reader import read_data_sets\n",
    "from model_seq2seq import *\n",
    "from trnn import *\n",
    "import numpy \n",
    "from train_config import *\n",
    "\n",
    "\n",
    "flags = tf.flags\n",
    "flags.DEFINE_string(\"model\", \"TLSTM\",\n",
    "          \"Model used for learning.\")\n",
    "flags.DEFINE_string(\"data_path\", \"./data.npy\",\n",
    "          \"Data input directory.\")\n",
    "flags.DEFINE_string(\"save_path\", \"./log/tlstm/\",\n",
    "          \"Model output directory.\")\n",
    "flags.DEFINE_bool(\"use_sched_samp\", False,\n",
    "                  \"Use scheduled sampling in training\")\n",
    "flags.DEFINE_integer(\"inp_steps\", 12, \"burn in steps\")\n",
    "flags.DEFINE_integer(\"out_steps\", None, \"test steps\")\n",
    "flags.DEFINE_integer(\"hidden_size\", 8, \"hidden layer size\")\n",
    "flags.DEFINE_float(\"learning_rate\", 1e-3, \"learning rate\")\n",
    "flags.DEFINE_float(\"decay_rate\", 0.8, \"learning rate\")\n",
    "flags.DEFINE_integer(\"rank\", 1, \"rank for tt decomposition\")\n",
    "\n",
    "FLAGS = flags.FLAGS\n",
    "print('Flags configuration loaded ...')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### step 2: Read flags and data into memory"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading time series ...\n",
      "input type  <class 'numpy.ndarray'> (1000, 100, 1)\n",
      "normalize to (0-1)\n",
      "----------------------------------------------------------------------------------------------------\n",
      "|input steps| 12 |out steps| 88 | hidden size| 8 |learning rate| 0.001 |rank val| [1]\n",
      "----------------------------------------------------------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "To forecast time series using a recurrent neural network, we consider every \n",
    "row as a sequence of short time series.\n",
    "'''\n",
    "\n",
    "# Training Parameters\n",
    "config = TrainConfig()\n",
    "config.hidden_size = 8\n",
    "config.learning_rate = 1e-3\n",
    "config.decay_rate = 0.8\n",
    "config.rank_vals = [1]\n",
    "\n",
    "# Scheduled sampling [optional]\n",
    "if False:\n",
    "    config.sample_prob = tf.get_variable(\"sample_prob\", shape=(), initializer=tf.zeros_initializer())\n",
    "sampling_burn_in = 400\n",
    "\n",
    "# Training Parameters\n",
    "training_steps = config.training_steps\n",
    "batch_size = config.batch_size\n",
    "display_step = 500\n",
    "inp_steps = config.inp_steps\n",
    "out_steps = None\n",
    "\n",
    "\n",
    "# Read Dataset\n",
    "dataset, stats = read_data_sets(\"./data.npy\", True, inp_steps, out_steps)\n",
    "\n",
    "# Network Parameters\n",
    "num_input = stats['num_input']  # dataset data input (time series dimension: 3)\n",
    "num_steps = stats['num_steps']\n",
    "\n",
    "if out_steps is None:\n",
    "    # Forecast for the rest if horizon is not set\n",
    "    out_steps = num_steps - inp_steps\n",
    "\n",
    "# Print training config\n",
    "print('-'*100)\n",
    "print('|input steps|', inp_steps, '|out steps|', out_steps ,'|', 'hidden size|',config.hidden_size, '|learning rate|',\n",
    "     config.learning_rate, '|rank val|', config.rank_vals)\n",
    "print('-'*100)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Step 3: Build neural network models\n",
    "Building seq2seq model for training/validation/testing,\n",
    "validation and testing models are shared,\n",
    "scheduled sampling is by default off "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training -->\n",
      "WARNING:tensorflow:At least two cells provided to MultiRNNCell are the same object and will share weights.\n",
      "          Create Encoder ...\n",
      "          Create Decoder ...\n",
      "Testing -->\n",
      "          Create Encoder ...\n",
      "          Create Decoder ...\n"
     ]
    }
   ],
   "source": [
    "# tf Graph input\n",
    "X = tf.placeholder(\"float\", [None, inp_steps, num_input])\n",
    "Y = tf.placeholder(\"float\", [None, out_steps, num_input])\n",
    "\n",
    "# Decoder output\n",
    "Z = tf.placeholder(\"float\", [None, out_steps, num_input])\n",
    "\n",
    "Model = globals()[\"TLSTM\"]\n",
    "with tf.name_scope(\"Train\"):\n",
    "    with tf.variable_scope(\"Model\", reuse=None):\n",
    "        train_pred = Model(X, Y, True,  config)\n",
    "with tf.name_scope(\"Test\"):\n",
    "    with tf.variable_scope(\"Model\", reuse=True):\n",
    "        test_pred = Model(X, Y, False,  config)\n",
    "\n",
    "\n",
    "# Define loss and optimizer\n",
    "train_loss = tf.sqrt(tf.reduce_mean(tf.squared_difference(train_pred, Z)))\n",
    "test_loss = tf.sqrt(tf.reduce_mean(tf.squared_difference(test_pred, Z)))\n",
    "\n",
    "# Exponential learning rate decay \n",
    "global_step = tf.Variable(0, trainable=False)\n",
    "starter_learning_rate = config.learning_rate\n",
    "learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,\n",
    "                                           2000, config.decay_rate, staircase=True)\n",
    "optimizer = tf.train.RMSPropOptimizer(learning_rate)\n",
    "train_op = optimizer.minimize(train_loss,global_step=global_step)\n",
    "\n",
    "# Scheduled sampling params\n",
    "eps_min = 0.1 # minimal prob\n",
    "\n",
    "# Write summary\n",
    "train_summary = tf.summary.scalar('train_loss', train_loss)\n",
    "valid_summary = tf.summary.scalar('valid_loss', test_loss)\n",
    "lr_summary = tf.summary.scalar('learning_rate', learning_rate)\n",
    "\n",
    "# Initialize the variables (i.e. assign their default value)\n",
    "init = tf.global_variables_initializer()\n",
    "\n",
    "# Saver for the model and loss\n",
    "saver = tf.train.Saver()\n",
    "hist_loss =[]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Training loop\n",
    "This may take a while, training loss is displayed at every 500 steps, \n",
    "use GPU can speed up training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "data len: 47520\n",
      "sequence len: 100\n",
      "result len: 31273\n",
      "result shape: (31273, 101)\n",
      "[[0.2618601605814073, 0.272793445356181, 0.28487653741061775, 0.3038895629816848, 0.30668394126597814, 0.3171486974028923, 0.31676974781223455, 0.34389838989255794, 0.3414001552747326, 0.3455933980764835, 0.3480323382112017, 0.36378485629099516, 0.35501214957068256, 0.37673454842987253, 0.3799627785005504, 0.3830988665584201, 0.39946166861190513, 0.39960157242014965, 0.413424977218205, 0.43111741696173594, 0.4187211270423991, 0.42257301872849795, 0.4381673079901676, 0.4281157741445601, 0.43851544558112493, 0.4289743092888701, 0.43775456740094465, 0.44317201928309996, 0.4304332683108079, 0.4463168328567445, 0.4445353375703193, 0.4508914916041944, 0.45918538606841325, 0.45668580961993427, 0.4585096049725722, 0.4473044584530228, 0.46183705182278845, 0.4651173849736635, 0.46680370004440946, 0.4634625190587769, 0.4601941311799277, 0.47486554705135553, 0.4750651794668939, 0.4917174753513908, 0.48586846660991356, 0.4830033336959108, 0.4886308910248218, 0.5003440333364073, 0.48581934493341067, 0.4878095205981101, 0.48652075797855, 0.4915987313692956, 0.4805996709758733, 0.4990557361843658, 0.4925549793016169, 0.49851182356623025, 0.5106444420056173, 0.510403396211566, 0.5124229282170062, 0.5251465180000839, 0.5244734568868548, 0.5267392598631021, 0.5310064325934094, 0.54322313346869, 0.5328384565624112, 0.5515114564975225, 0.5492158958244066, 0.5393577884706061, 0.5633347194709892, 0.5626839720527049, 0.5656684751637414, 0.5772089910316083, 0.5817792315438087, 0.5892181339583727, 0.5937749663395063, 0.5886462645746592, 0.5921837192832694, 0.5897646168311614, 0.6058739134057193, 0.5910437028189618, 0.6042731904678224, 0.6086910847047329, 0.6144763782794471, 0.6111968712541929, 0.6094775049412391, 0.6262683678359224, 0.6305338244621602, 0.611745313698703, 0.6388550021876049, 0.6507305016368781, 0.6523729283970693, 0.6704907491840512, 0.6839898362811206, 0.6975168030998239, 0.693874063392413, 0.7189242903914816, 0.7568609986329679, 0.7566852311033372, 0.7725189167040399, 0.7954978873663654, 0.794562606761342]]\n",
      "[[0.2618601605814073, 0.272793445356181, 0.28487653741061775, 0.3038895629816848, 0.30668394126597814, 0.3171486974028923, 0.31676974781223455, 0.34389838989255794, 0.3414001552747326, 0.3455933980764835, 0.3480323382112017, 0.36378485629099516, 0.35501214957068256, 0.37673454842987253, 0.3799627785005504, 0.3830988665584201, 0.39946166861190513, 0.39960157242014965, 0.413424977218205, 0.43111741696173594, 0.4187211270423991, 0.42257301872849795, 0.4381673079901676, 0.4281157741445601, 0.43851544558112493, 0.4289743092888701, 0.43775456740094465, 0.44317201928309996, 0.4304332683108079, 0.4463168328567445, 0.4445353375703193, 0.4508914916041944, 0.45918538606841325, 0.45668580961993427, 0.4585096049725722, 0.4473044584530228, 0.46183705182278845, 0.4651173849736635, 0.46680370004440946, 0.4634625190587769, 0.4601941311799277, 0.47486554705135553, 0.4750651794668939, 0.4917174753513908, 0.48586846660991356, 0.4830033336959108, 0.4886308910248218, 0.5003440333364073, 0.48581934493341067, 0.4878095205981101, 0.48652075797855, 0.4915987313692956, 0.4805996709758733, 0.4990557361843658, 0.4925549793016169, 0.49851182356623025, 0.5106444420056173, 0.510403396211566, 0.5124229282170062, 0.5251465180000839, 0.5244734568868548, 0.5267392598631021, 0.5310064325934094, 0.54322313346869, 0.5328384565624112, 0.5515114564975225, 0.5492158958244066, 0.5393577884706061, 0.5633347194709892, 0.5626839720527049, 0.5656684751637414, 0.5772089910316083, 0.5817792315438087, 0.5892181339583727, 0.5937749663395063, 0.5886462645746592, 0.5921837192832694, 0.5897646168311614, 0.6058739134057193, 0.5910437028189618, 0.6042731904678224, 0.6086910847047329, 0.6144763782794471, 0.6111968712541929, 0.6094775049412391, 0.6262683678359224, 0.6305338244621602, 0.611745313698703, 0.6388550021876049, 0.6507305016368781, 0.6523729283970693, 0.6704907491840512, 0.6839898362811206, 0.6975168030998239, 0.693874063392413, 0.7189242903914816, 0.7568609986329679, 0.7566852311033372, 0.7725189167040399, 0.7954978873663654, 0.794562606761342]]\n",
      "normalise_windows result shape: (31273, 101)\n",
      "finish\n"
     ]
    }
   ],
   "source": [
    "def normalise_windows(window_data):\n",
    "    record = []\n",
    "    normalised_data = []\n",
    "    for window in window_data:   #window shape (sequence_length L ,)  即(51L,)\n",
    "        record.append(window[0])\n",
    "        normalised_window = [((float(p) / float(window[0])) +0.1) for p in window]\n",
    "        normalised_data.append(normalised_window)\n",
    "    return normalised_data,record\n",
    "\n",
    "def load_data(filename, seq_len, normalise_window):\n",
    "    f = open(filename, 'rb').read()\n",
    "    data = f.split(str.encode('\\n'))\n",
    "    \n",
    "    for i in range(len(data)):\n",
    "        if i > 5:\n",
    "            data[i] = (int(data[i])+data[i-1]+data[i-2]+data[i-3]+data[i-4])/5\n",
    "        else:\n",
    "            data[i] = int(data[i])\n",
    "    \n",
    "    for i in range(len(data)):\n",
    "        data[i] = data[i]/800\n",
    "    \n",
    "    print('data len:',len(data))\n",
    "    print('sequence len:',seq_len)\n",
    "\n",
    "    sequence_length = seq_len + 1\n",
    "    result = []\n",
    "    for index in range(len(data) - sequence_length):\n",
    "        if sum( [ int(index>73 + i * 288 and index<264 + i * 288) for i in range(47520//288) ] ) > 0:\n",
    "            result.append(data[index: index + sequence_length])  #得到长度为seq_len+1的向量，最后一个作为label\n",
    "\n",
    "    print('result len:',len(result))\n",
    "    print('result shape:',np.array(result).shape)\n",
    "    print(result[:1])\n",
    "\n",
    "    record = 0\n",
    "    print(result[:1])\n",
    "    print('normalise_windows result shape:',np.array(result).shape)\n",
    "\n",
    "    result = np.array(result)\n",
    "\n",
    "    #划分train、test\n",
    "    row = round(0.9 * result.shape[0])\n",
    "    # print(row)\n",
    "    # input()\n",
    "    train = result[:row, :]\n",
    "    np.random.shuffle(train)\n",
    "    x_train = train[:, :-1]\n",
    "    y_train = train[:, -1]\n",
    "    x_test = result[row:, :-1]\n",
    "    y_test = result[row:, -1]\n",
    "\n",
    "    x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n",
    "    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n",
    "\n",
    "    return [x_train, y_train, x_test, y_test,record]\n",
    "\n",
    "zzl_x_train, zzl_y_train, zzl_x_test, zzl_y_test,zzl_record = load_data('sp500.csv', 100, True)\n",
    "print('finish')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "def newtdata():\n",
    "    \n",
    "    randi = np.random.randint(len(zzl_x_train))\n",
    "    batch_x = np.zeros((50,12,1))\n",
    "    batch_y = np.zeros((50,88,1))\n",
    "    batch_z = np.zeros((50,88,1))\n",
    "    \n",
    "    for i in range(50):\n",
    "        a = zzl_x_train[randi]\n",
    "        c = a[13:]\n",
    "        c = np.append(c,zzl_y_train[randi])\n",
    "        c = c.reshape((-1,1))\n",
    "        b = a[12:]\n",
    "        a = a[:12]\n",
    "        \n",
    "        batch_x[i,:,:] = a\n",
    "        batch_y[i,:,:] = b\n",
    "        batch_z[i,:,:] = c\n",
    "       \n",
    "    return batch_x,batch_y,batch_z\n",
    "def newvdata():\n",
    "    \n",
    "    randi = np.random.randint(len(zzl_x_train))\n",
    "    batch_x = np.zeros((50,12,1))\n",
    "    batch_y = np.zeros((50,88,1))\n",
    "    batch_z = np.zeros((50,88,1))\n",
    "    \n",
    "    for i in range(50):\n",
    "        a = zzl_x_train[randi]\n",
    "        c = a[13:]\n",
    "        c = np.append(c,zzl_y_train[randi])\n",
    "        c = c.reshape((-1,1))\n",
    "        b = a[12:]\n",
    "        a = a[:12]\n",
    "        \n",
    "        batch_x[i,:,:] = a\n",
    "        batch_y[i,:,:] = b\n",
    "        batch_z[i,:,:] = c\n",
    "       \n",
    "    return batch_x,batch_y,batch_z\n",
    "\n",
    "def newtrdata():\n",
    "    \n",
    "    randi = np.random.randint(len(zzl_x_train))\n",
    "    batch_x = np.zeros((100,12,1))\n",
    "    batch_y = np.zeros((100,88,1))\n",
    "    batch_z = np.zeros((100,88,1))\n",
    "    \n",
    "    for i in range(100):\n",
    "        a = zzl_x_train[randi]\n",
    "        c = a[13:]\n",
    "        c = np.append(c,zzl_y_train[randi])\n",
    "        c = c.reshape((-1,1))\n",
    "        b = a[12:]\n",
    "        a = a[:12]\n",
    "        \n",
    "        batch_x[i,:,:] = a\n",
    "        batch_y[i,:,:] = b\n",
    "        batch_z[i,:,:] = c\n",
    "    \n",
    "    \n",
    "#     print(a.shape,b.shape,c.shape)\n",
    "    return batch_x,batch_y,batch_z\n",
    "# newtraindata()\n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 1, Minibatch Loss= 0.3330\n",
      "Validation Loss: 0.29102433\n",
      "Step 500, Minibatch Loss= 0.2089\n",
      "Validation Loss: 0.24209599\n",
      "Step 1000, Minibatch Loss= 0.2530\n",
      "Validation Loss: 0.24490592\n",
      "Step 1500, Minibatch Loss= 0.1352\n",
      "Validation Loss: 0.27513233\n",
      "Step 2000, Minibatch Loss= 0.0779\n",
      "Validation Loss: 0.09094393\n",
      "Step 2500, Minibatch Loss= 0.0586\n",
      "Validation Loss: 0.43788236\n",
      "Step 3000, Minibatch Loss= 0.1952\n",
      "Validation Loss: 0.20032044\n",
      "Step 3500, Minibatch Loss= 0.3488\n",
      "Validation Loss: 0.04980065\n",
      "Step 4000, Minibatch Loss= 0.1190\n",
      "Validation Loss: 0.28329897\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-16-6057a38df55a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     14\u001b[0m \u001b[0;31m#         print(batch_x.shape,batch_y.shape, batch_z.shape)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     15\u001b[0m         \u001b[0;31m# Run optimization op (backprop)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m         \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mZ\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mbatch_z\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     17\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mstep\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mdisplay_step\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mstep\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     18\u001b[0m             \u001b[0;31m# Calculate batch loss\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m    927\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    928\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m                          run_metadata_ptr)\n\u001b[0m\u001b[1;32m    930\u001b[0m       \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    931\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1150\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1151\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m   1153\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1154\u001b[0m       \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1326\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1327\u001b[0m       return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m                            run_metadata)\n\u001b[0m\u001b[1;32m   1329\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1330\u001b[0m       \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m   1332\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1333\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1335\u001b[0m     \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1336\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m   1317\u001b[0m       \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1318\u001b[0m       return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m           options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m   1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1321\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m   1405\u001b[0m     return tf_session.TF_SessionRun_wrapper(\n\u001b[1;32m   1406\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m         run_metadata)\n\u001b[0m\u001b[1;32m   1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1409\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_call_tf_sessionprun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# Start training\n",
    "with tf.Session() as sess:\n",
    "    # Merge all the summaries and write them out to /log/tlstm (by default)\n",
    "    merged = tf.summary.merge_all()\n",
    "    summary_writer = tf.summary.FileWriter(\"./log/tlstm/\",sess.graph)\n",
    "\n",
    "    # Run the initializer\n",
    "    sess.run(init)    \n",
    "    \n",
    "    for step in range(1, 100000+1):\n",
    "#         batch_x, batch_y, batch_z = dataset.train.next_batch(batch_size)\n",
    "        batch_x, batch_y, batch_z = newtdata()\n",
    "        \n",
    "#         print(batch_x.shape,batch_y.shape, batch_z.shape)\n",
    "        # Run optimization op (backprop)\n",
    "        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, Z:batch_z})\n",
    "        if step % display_step == 0 or step == 1:\n",
    "            # Calculate batch loss \n",
    "            summary, loss = sess.run([merged,train_loss], feed_dict={X: batch_x,Y: batch_y, Z:batch_z})\n",
    "            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n",
    "            run_metadata = tf.RunMetadata()\n",
    "            summary_writer.add_run_metadata(run_metadata, 'step%03d' % step)\n",
    "            summary_writer.add_summary(summary, step)\n",
    "            print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n",
    "                  \"{:.4f}\".format(loss) )\n",
    "            \n",
    "            # Calculate validation\n",
    "            valid_enc_inps,valid_dec_inps,valid_dec_outs = newvdata()\n",
    "\n",
    "            va_sum, va_loss = sess.run([valid_summary,test_loss], \\\n",
    "                                       feed_dict={X: valid_enc_inps, Y: valid_dec_inps, Z: valid_dec_outs})\n",
    "            summary_writer.add_summary(va_sum, step) \n",
    "            print(\"Validation Loss:\", va_loss)\n",
    "            \n",
    "            # Overfitting\n",
    "            hist_loss.append(va_loss)\n",
    "#             if len(hist_loss)>20 and va_loss > np.mean(hist_loss):\n",
    "#                 print(\"Early stopping: step \", step)\n",
    "#                 break\n",
    "          \n",
    "            #Update sampling prob\n",
    "            if False and step > sampling_burn_in:\n",
    "                sample_prob = max(eps_min, 1.0-step/(2*training_steps))\n",
    "                sess.run(tf.assign(config.sample_prob, sample_prob))\n",
    "                print('Sampling prob:', sample_prob)\n",
    "\n",
    "    print(\"Optimization Finished!\")\n",
    "\n",
    "    # Calculate accuracy for test datasets\n",
    "    test_enc_inps,test_dec_inps,test_dec_outs = newtrdata()\n",
    "\n",
    "    # Fetch the predictions \n",
    "    fetches = {\n",
    "        \"true\":Z,\n",
    "        \"pred\":test_pred,\n",
    "        \"loss\":test_loss\n",
    "    }\n",
    "    test_vals = sess.run(fetches, feed_dict={X: test_enc_inps, Y: test_dec_inps, Z: test_dec_outs})\n",
    "    print(\"Testing Loss:\", test_vals[\"loss\"])\n",
    "\n",
    "    # Save the variables to disk.\n",
    "    save_path = saver.save(sess, \"./log/tlstm/\")\n",
    "    print(\"Model saved in file: %s\" % save_path)\n",
    "    # Save predictions \n",
    "    numpy.save(save_path+\"predict.npy\", (test_vals[\"true\"], test_vals[\"pred\"]))\n",
    "    # Save config file\n",
    "    with open(save_path+\"config.out\", 'w') as f:\n",
    "        f.write('hidden_size:'+ str(config.hidden_size)+'\\t'+ 'learning_rate:'+ str(config.learning_rate)+ '\\n')\n",
    "        f.write('train_error:'+ str(loss) +'\\t'+ 'valid_error:' + str(va_loss) + '\\t'+ 'test_error:'+ str(test_vals[\"loss\"]) +'\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_enc_inps,test_dec_inps,test_dec_outs = newtrdata()\n",
    "\n",
    "# Fetch the predictions \n",
    "fetches = {\n",
    "\"true\":Z,\n",
    "\"pred\":test_pred,\n",
    "\"loss\":test_loss\n",
    "}\n",
    "test_vals = sess.run(fetches, feed_dict={X: test_enc_inps, Y: test_dec_inps, Z: test_dec_outs})\n",
    "print(\"Testing Loss:\", test_vals[\"loss\"])\n",
    "\n",
    "# Save the variables to disk.\n",
    "save_path = saver.save(sess, \"./log/tlstm/\")\n",
    "print(\"Model saved in file: %s\" % save_path)\n",
    "# Save predictions \n",
    "numpy.save(save_path+\"predict.npy\", (test_vals[\"true\"], test_vals[\"pred\"]))\n",
    "# Save config file\n",
    "with open(save_path+\"config.out\", 'w') as f:\n",
    "    f.write('hidden_size:'+ str(config.hidden_size)+'\\t'+ 'learning_rate:'+ str(config.learning_rate)+ '\\n')\n",
    "    f.write('train_error:'+ str(loss) +'\\t'+ 'valid_error:' + str(va_loss) + '\\t'+ 'test_error:'+ str(test_vals[\"loss\"]) +'\\n')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Visualize predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_true = test_vals[\"true\"]\n",
    "y_pred = test_vals[\"pred\"]\n",
    "plt.plot(y_true[0,:,0].T,':')\n",
    "plt.plot(y_pred[0,:,0].T,'-')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
