{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import datetime\n",
    "import pandas as pd\n",
    "import os\n",
    "import tensorflow as tf\n",
    "from collections import namedtuple\n",
    "import numpy as np\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Read sussessful\n"
     ]
    }
   ],
   "source": [
    "if os.path.exists('601318.SH_5min.csv'):\n",
    "    data = pd.read_csv('601318.SH_5min.csv', index_col=0)\n",
    "    print('Read sussessful')\n",
    "#     print('head: \\n', data.head())\n",
    "#     print('tail: \\n', data.tail())\n",
    "else:\n",
    "    print('File not exist')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>ts_code</th>\n",
       "      <th>trade_time</th>\n",
       "      <th>open</th>\n",
       "      <th>close</th>\n",
       "      <th>high</th>\n",
       "      <th>low</th>\n",
       "      <th>vol</th>\n",
       "      <th>amount</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>601318.SH</td>\n",
       "      <td>2019-10-22 15:00:00</td>\n",
       "      <td>90.15</td>\n",
       "      <td>90.29</td>\n",
       "      <td>90.29</td>\n",
       "      <td>90.15</td>\n",
       "      <td>966545.0</td>\n",
       "      <td>87224082.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>601318.SH</td>\n",
       "      <td>2019-10-22 14:55:00</td>\n",
       "      <td>90.12</td>\n",
       "      <td>90.15</td>\n",
       "      <td>90.19</td>\n",
       "      <td>90.11</td>\n",
       "      <td>735530.0</td>\n",
       "      <td>66309263.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>601318.SH</td>\n",
       "      <td>2019-10-22 14:50:00</td>\n",
       "      <td>90.10</td>\n",
       "      <td>90.11</td>\n",
       "      <td>90.12</td>\n",
       "      <td>90.07</td>\n",
       "      <td>558033.0</td>\n",
       "      <td>50276229.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>601318.SH</td>\n",
       "      <td>2019-10-22 14:45:00</td>\n",
       "      <td>90.10</td>\n",
       "      <td>90.10</td>\n",
       "      <td>90.14</td>\n",
       "      <td>90.06</td>\n",
       "      <td>640738.0</td>\n",
       "      <td>57733317.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>601318.SH</td>\n",
       "      <td>2019-10-22 14:40:00</td>\n",
       "      <td>90.25</td>\n",
       "      <td>90.12</td>\n",
       "      <td>90.25</td>\n",
       "      <td>90.06</td>\n",
       "      <td>724000.0</td>\n",
       "      <td>65254508.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "     ts_code           trade_time   open  close   high    low       vol  \\\n",
       "0  601318.SH  2019-10-22 15:00:00  90.15  90.29  90.29  90.15  966545.0   \n",
       "1  601318.SH  2019-10-22 14:55:00  90.12  90.15  90.19  90.11  735530.0   \n",
       "2  601318.SH  2019-10-22 14:50:00  90.10  90.11  90.12  90.07  558033.0   \n",
       "3  601318.SH  2019-10-22 14:45:00  90.10  90.10  90.14  90.06  640738.0   \n",
       "4  601318.SH  2019-10-22 14:40:00  90.25  90.12  90.25  90.06  724000.0   \n",
       "\n",
       "       amount  \n",
       "0  87224082.0  \n",
       "1  66309263.0  \n",
       "2  50276229.0  \n",
       "3  57733317.0  \n",
       "4  65254508.0  "
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data.head(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_time(t):\n",
    "    time = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S')\n",
    "    minus_9_30 = (int(time.strftime('%H'))-9) * 12 + int(time.strftime('%M')) / 5 - 6\n",
    "    return minus_9_30 if minus_9_30 < 25 else minus_9_30 - 18\n",
    "\n",
    "class Observations:\n",
    "    def __init__(self, index, is_hold, wait_time, trade_price):\n",
    "        # is_hold: 是否持有股票，1表示持有，0表示未持有\n",
    "        # trade_price: 距离上次操作的时间（多少个5分钟）\n",
    "        # trade_price: 上次交易价格\n",
    "        \n",
    "        self.index = index\n",
    "        self.is_hold = is_hold \n",
    "        self.wait_time = wait_time\n",
    "        self.trade_price = trade_price\n",
    "\n",
    "    def values(self, history_data, length):\n",
    "        # history_data: DataFram 索引越靠前日期越靠后\n",
    "        # 返回数据为 length * 6 + 3， 前 length * 6 为每日的 time，open， close， high， low， vol-10000\n",
    "        # 其中 time 为 0 到 48， 表示一天中的第几个5分钟\n",
    "        # 最后三位分别是 is_hold * 100，即100为持仓, 持仓是否过夜，100为过夜\n",
    "        \n",
    "        recent_data = history_data[['trade_time', 'open', 'high', 'low', 'close', 'vol']][\n",
    "            self.index: self.index+length]\n",
    "        recent_data['vol'] = recent_data['vol']/10000\n",
    "        recent_data['trade_time'] = recent_data['trade_time'].apply(lambda x: get_time(x))\n",
    "        is_pass_night = self.wait_time > 48 or self.wait_time > recent_data['trade_time'].iloc[0]\n",
    "        return np.hstack([np.array(recent_data.values).reshape(1,-1),\n",
    "                          np.array([[self.is_hold*100, 100 if is_pass_night else 0, self.trade_price]])])\n",
    "        \n",
    "    def decode(self, history_data, length, log=False):\n",
    "        recent_data = history_data[['trade_time', 'open', 'high', 'low', 'close', 'vol']][\n",
    "            self.index: self.index+length]\n",
    "        recent_data['trade_time'] = recent_data['trade_time'].apply(lambda x: get_time(x))\n",
    "\n",
    "        if log:\n",
    "            print('recent data is :\\n', recent_data)\n",
    "            print('')\n",
    "\n",
    "            if self.is_hold:\n",
    "                print('Hold stock for {} minutes， purchase price is {}.'.format(\n",
    "                    self.wait_time*5, self.trade_price))\n",
    "            else:\n",
    "                print('Dosen\\'t hold any thing.')\n",
    "        return recent_data\n",
    "        \n",
    "    def __str__(self):\n",
    "        return 'index: {}, is_hold: {}, wait_time: {}, trade_price: {}\\n'.format(\n",
    "            self.index, self.is_hold, self.wait_time, self.trade_price)\n",
    "    \n",
    "    def __repr__(self):\n",
    "        return self.__str__()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Observation test\n",
    "# index， is_hold， wait_time， trade_price\n",
    "# obs = Observations(*[1, 1, 0, 89])\n",
    "# next_obs = Observations(*[0, 1, 1, 89])\n",
    "# print(obs.values(data, 3))\n",
    "# print(next_obs.values(data, 3))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Actions:\n",
    "    \n",
    "    def __init__(self, action_prob):\n",
    "        # 买、卖、持有的几率\n",
    "        self.p_buy = action_prob[0]\n",
    "        self.p_sell = action_prob[1] \n",
    "        self.p_hold = 1 - action_prob[0] - action_prob[1] \n",
    "        \n",
    "        self.action_choose = np.random.choice(['buy', 'sell', 'hold'],\n",
    "                                              p=[self.p_buy, self.p_sell, self.p_hold])\n",
    "# Deubg \n",
    "        print('action_prob: {}, action is {}'.format(action_prob, self.action_choose))\n",
    "    \n",
    "    def choose(self):\n",
    "\n",
    "        return self.action_choose"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "+ 注意： 交易默认只买 100 股，手续费默认"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def calc_reward_batch(obs, next_obs, history_data):\n",
    "    # obs 和 next_obs 为 Observation 类\n",
    "    \n",
    "    fee = obs.trade_price * 0.02 if next_obs.wait_time == 1 else 0\n",
    "    if obs.is_hold == 1:\n",
    "        delta_price = (next_obs.decode(history_data, 1).close.iloc[0]\n",
    "                       - obs.decode(history_data, 1).close.iloc[0]) * 100\n",
    "        return delta_price - fee\n",
    "    else:\n",
    "        return -fee\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算 reward 测试\n",
    "# obs.decode(data, 3, log=True)\n",
    "# next_obs.decode(data, 3, log=True)\n",
    "# calc_reward_batch(obs, next_obs, data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Env:\n",
    "    def __init__(self, hps, history_data):\n",
    "        self._hps = hps\n",
    "        self._history_data = history_data\n",
    "        \n",
    "        self._observations_dim = hps.days * 6 + 3\n",
    "        self._actions_dim = 3\n",
    "        return\n",
    "    \n",
    "    def reset(self):\n",
    "        index = self._history_data.shape[0] - self._hps.days - 1\n",
    "        return Observations(index=index, is_hold=0, wait_time=0, trade_price=0)\n",
    "    \n",
    "    def step(self, obs, action):\n",
    "        # 输入为 Observations 类和 Actions 类\n",
    "        # 返回值为 next observations， reward， done\n",
    "        index, is_hold, wait_time, trade_price = obs.index, obs.is_hold, obs.wait_time, obs.trade_price\n",
    "        done = True if index == 0 else False\n",
    "        action_choose = action.choose()\n",
    "        \n",
    "        if is_hold == 1 and action_choose == 'sell':\n",
    "            current_time = get_time(self._history_data['trade_time'].iloc[index])\n",
    "            is_pass_night = wait_time>48 or wait_time>current_time\n",
    "            if is_pass_night:\n",
    "                is_hold = 0  # 卖掉了\n",
    "                wait_time = 0  # 时间清0\n",
    "                trade_price = self._history_data['close'].iloc[index]  # 以当前的收盘价为成交加个\n",
    "            else:\n",
    "                pass # 不做操作，类似 hold\n",
    "        elif is_hold == 0 and action_choose == 'buy':\n",
    "            is_hold = 1\n",
    "            wait_time = 0\n",
    "            trade_price = self._history_data['close'].iloc[index]\n",
    "        else:\n",
    "            pass # 不做操作\n",
    "        \n",
    "        next_obs = Observations(index-1, is_hold, wait_time+1, trade_price)\n",
    "\n",
    "        return next_obs, calc_reward_batch(obs, next_obs, self._history_data), done\n",
    "    \n",
    "    @property\n",
    "    def observations_dim(self):\n",
    "        return self._observations_dim\n",
    "    \n",
    "    @property\n",
    "    def actions_dim(self):\n",
    "        return self._actions_dim"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DataSet:\n",
    "    def __init__(self, hps, history_data):\n",
    "        self._buffer = []\n",
    "        self._length = 0\n",
    "        self._hps = hps\n",
    "        self._history_data = history_data\n",
    "        return \n",
    "    \n",
    "    def get_batch(self, nums):\n",
    "        assert self._length > 1, 'Length of data is {} which is not enough. \\\n",
    "        Data need at least {}'.format(self._length, 2)\n",
    "        \n",
    "        rand_idx = np.random.randint(0, self._length-1, nums)\n",
    "        obs = np.vstack([self._buffer[x].values(\n",
    "            self._history_data, self._hps.days) for x in rand_idx])\n",
    "        \n",
    "        next_obs = np.vstack([self._buffer[x+1].values(\n",
    "            self._history_data, self._hps.days) for x in rand_idx])\n",
    "        \n",
    "        reward = np.array([calc_reward_batch(self._buffer[x], self._buffer[x+1], self._history_data)\n",
    "                           for x in rand_idx])\n",
    "    \n",
    "        return obs, next_obs, reward\n",
    "    \n",
    "    def add_data(self, obs):\n",
    "        # obs 为 Observation 类\n",
    "        self._buffer.append(obs)\n",
    "        self._length += 1\n",
    "        return\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# DataSet test\n",
    "\n",
    "# obs = Observations(*[1, 1, 0, 89])\n",
    "# next_obs = Observations(*[0, 1, 1, 89])\n",
    "# data_set = DataSet(hps, data)\n",
    "# data_set.add_data(obs)\n",
    "# data_set.add_data(obs)\n",
    "# data_set.add_data(obs)\n",
    "# print(data_set._buffer)\n",
    "# obs.trade_price = 100\n",
    "# print(data_set._buffer)\n",
    "# print(obs)\n",
    "# data_set.add_data(obs)\n",
    "# obs, next_obs, reward = data_set.get_batch(2)\n",
    "# print('obs: \\n', obs)\n",
    "# print('\\nnext_obs: \\n', next_obs)\n",
    "# print('\\nreward: \\n', reward)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Model:\n",
    "    def __init__(self, env, hps):\n",
    "        self._env = env\n",
    "        self._hps = hps\n",
    "        \n",
    "        self.global_step = tf.Variable(0, name='global_step', trainable=False)\n",
    "        self.action, self.Q, self.action_loss, self.Q_loss, self.action_train_opt, self.Q_train_opt = \\\n",
    "            self._build_graph()\n",
    "        self._sess, self._summary_writer = self._sess_setup()\n",
    "        return\n",
    "    \n",
    "    def train(self, iteration, data_set):\n",
    "        for i in range(iteration):\n",
    "            obs, next_obs, reward = data_set.get_batch(self._hps.batch_size)\n",
    "            action_loss, Q_loss = self._train_one_step(obs, next_obs, reward)\n",
    "            print('action_loss: {}, Q_loss : {}'.format(action_loss, Q_loss))\n",
    "\n",
    "        return\n",
    "    \n",
    "    def test(self, data):\n",
    "        return\n",
    "    \n",
    "    def predict(self, obs):\n",
    "        action_prob = self._sess.run(self.action, {self._observations_ph: obs})\n",
    "        return action_prob\n",
    "    \n",
    "    def _train_one_step(self, obs, next_obs, reward):\n",
    "        feed_dict = {self._observations_ph: obs,\n",
    "                    self._next_observations_ph: next_obs,\n",
    "                    self._rewards_ph: reward}\n",
    "        \n",
    "        action_loss, Q_loss, _, _ = self._sess.run([self.action_loss, self.Q_loss,\n",
    "                                                    self.action_train_opt, self.Q_train_opt],\n",
    "                                                   feed_dict)\n",
    "        return action_loss, Q_loss\n",
    "    \n",
    "    def _sess_setup(self):\n",
    "        saver = tf.train.Saver(max_to_keep=3)\n",
    "        sv = tf.train.Supervisor(logdir=self._hps.train_dir,\n",
    "                   is_chief=True,\n",
    "                   saver=saver,\n",
    "                   summary_op=None,\n",
    "                   save_summaries_secs=600, # save summaries for tensorboard every 60 secs\n",
    "                   save_model_secs=600, # checkpoint every 600 secs\n",
    "                   global_step=self.global_step,\n",
    "                   init_feed_dict= None\n",
    "                   )\n",
    "        summary_writer = sv.summary_writer\n",
    "        sess = sv.prepare_or_wait_for_session()\n",
    "    \n",
    "        return sess, summary_writer\n",
    "    \n",
    "    def _create_placeholders(self):\n",
    "        observations_dim = self._env.observations_dim\n",
    "        actions_dim = self._env.actions_dim\n",
    "\n",
    "        \n",
    "        self._observations_ph = tf.placeholder(\n",
    "            tf.float32,\n",
    "            shape=(None, observations_dim),\n",
    "            name='observation',\n",
    "        )\n",
    "        self._next_observations_ph = tf.placeholder(\n",
    "            tf.float32,\n",
    "            shape=(None, observations_dim),\n",
    "            name='next_observation',\n",
    "        )\n",
    "#         self._actions_ph = tf.placeholder(\n",
    "#             tf.float32,\n",
    "#             shape=(None, actions_dim),\n",
    "#             name='actions',\n",
    "#         )\n",
    "        self._rewards_ph = tf.placeholder(\n",
    "            tf.float32,\n",
    "            shape=(None, ),\n",
    "            name='rewards',\n",
    "        )\n",
    "        return\n",
    "    \n",
    "#     def _linear(self, arg, output_size, activation, scope=None, reuse=False):\n",
    "#         input_size = arg.get_shape().as_list()[1]\n",
    "#         print('input_size', input_size)\n",
    "#         trunc_norm_init = tf.truncated_normal_initializer(stddev=self._hps.trunc_norm_init_std)\n",
    "        \n",
    "#         with tf.variable_scope(scope or \"Linear\", reuse=reuse):\n",
    "#             matrix = tf.get_variable(\"Matrix\", [input_size, output_size])\n",
    "#             res = tf.matmul(arg, matrix)\n",
    "#             bias_term = tf.get_variable(\"Bias\", [output_size],\n",
    "#                                         initializer=trunc_norm_init)\n",
    "#         return activation(res + bias_term)\n",
    "        \n",
    "    def _action_Q_output(self, state, reuse=False):\n",
    "        \n",
    "        with tf.variable_scope('hidden_state', reuse=tf.AUTO_REUSE):\n",
    "            hidden_states = tf.layers.dense(state, self._hps.hidden_dim,\n",
    "                                           activation=tf.nn.sigmoid, name='state_hidden_layer')\n",
    "            \n",
    "        with tf.variable_scope('action_output', reuse=tf.AUTO_REUSE):\n",
    "            actions = tf.nn.softmax(tf.layers.dense(hidden_states, self._env.actions_dim,\n",
    "                                                    activation=tf.nn.sigmoid, name='action_output_layer'))\n",
    "            \n",
    "        with tf.variable_scope('Q_output', reuse=tf.AUTO_REUSE):\n",
    "            Q = tf.layers.dense(tf.concat([hidden_states, actions], axis=1), 1,\n",
    "                                           activation=None, name='Q_output_layer')\n",
    "        return hidden_states, actions, Q\n",
    "        \n",
    "    def _build_graph(self):\n",
    "        self._create_placeholders()\n",
    "        _, action, Q = self._action_Q_output(self._observations_ph)\n",
    "        _, _, next_Q = self._action_Q_output(self._next_observations_ph)\n",
    "        \n",
    "        # Calculate action loss and Q loss\n",
    "        action_loss = -tf.reduce_sum(tf.squeeze(Q), axis=0)\n",
    "\n",
    "        Q_loss = tf.reduce_sum(self._rewards_ph + tf.squeeze(self._hps.gamma * next_Q - Q), axis=0)\n",
    "\n",
    "        \n",
    "        # Get update option\n",
    "        t_vars = tf.trainable_variables()\n",
    "        action_vars = [var for var in t_vars \n",
    "                       if var.name.startswith('hidden_state') or var.name.startswith('action_output')]\n",
    "        \n",
    "        Q_vars = [var for var in t_vars \n",
    "                  if var.name.startswith('hidden_state') or var.name.startswith('Q_output')] \n",
    "        \n",
    "        action_train_opt = tf.train.AdamOptimizer(self._hps.learning_rate).minimize(\n",
    "            action_loss, var_list = action_vars)\n",
    "        \n",
    "        Q_train_opt = tf.train.AdamOptimizer(self._hps.learning_rate).minimize(\n",
    "            action_loss, var_list = Q_vars)\n",
    "        \n",
    "        return action, Q, action_loss, Q_loss, action_train_opt, Q_train_opt\n",
    "\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Agent:\n",
    "    def __init__(self, hps, env, history_data):\n",
    "        self._hps = hps\n",
    "        self._env = env\n",
    "        self._history_data = history_data\n",
    "        self._data_set = DataSet(hps, history_data)\n",
    "        self._model = Model(env, hps)\n",
    "        return\n",
    "    \n",
    "    def step(self, obs):\n",
    "        self._data_set.add_data(obs)\n",
    "        action_prob = self._model.predict(obs.values(self._history_data, self._hps.days))\n",
    "        action = Actions(action_prob[0])\n",
    "        \n",
    "        if self._data_set._length > 20:\n",
    "            self._model.train(1, self._data_set)\n",
    "            \n",
    "        return action\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "hps = {'trunc_norm_init_std': 1e-4,\n",
    "      'hidden_dim': 20,\n",
    "      'train_dir': './model',\n",
    "      'gamma': 0.99,\n",
    "      'learning_rate': 0.003,\n",
    "      'batch_size': 10,\n",
    "      'days': 20}\n",
    "hps = namedtuple(\"HParams\", hps.keys())(**hps)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# For model test\n",
    "# env = Env(hps, data)\n",
    "# obs = env.reset()\n",
    "# obs.values(data, hps.days)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-12-8cbd254bac9a>:46: Supervisor.__init__ (from tensorflow.python.training.supervisor) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please switch to tf.train.MonitoredTrainingSession\n",
      "INFO:tensorflow:Restoring parameters from ./model/model.ckpt-0\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Starting standard services.\n",
      "INFO:tensorflow:Saving checkpoint to path ./model/model.ckpt\n",
      "INFO:tensorflow:Starting queue runners.\n",
      "INFO:tensorflow:global_step/sec: 0\n"
     ]
    }
   ],
   "source": [
    "env = Env(hps, data)\n",
    "agent = Agent(hps, env, data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "obs = env.reset()\n",
    "rewards = []\n",
    "reward_sum = 0\n",
    "n = 100000\n",
    "for i in range(n):\n",
    "    print('{}/{}'.format(i, n))\n",
    "    action = agent.step(obs)\n",
    "    obs, reward, done = env.step(obs, action)\n",
    "    rewards.append(reward)\n",
    "    reward_sum += reward\n",
    "    print('reward is {:.2f}, reward sum is {:.2f}, time is {}, close is {}\\n'.format(\n",
    "        reward, reward_sum, obs.decode(data, 1)['trade_time'].iloc[0], obs.decode(data, 1)['close'].iloc[0]))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "agent._data_set._buffer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  },
  "toc": {
   "base_numbering": 1.0,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
