{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "config = {\n",
    "        'training':{\n",
    "            'steps': 2000,\n",
    "            'batch_size': 16,\n",
    "            'buffer_bias': 5e-5\n",
    "        },\n",
    "        'coin_no': 11, \n",
    "        'window_size': 50, \n",
    "        'feature_no': 3,\n",
    "        \"test_portion\": 0.08,\n",
    "        \"global_period\": 1800,\n",
    "        \"trading_consumption\": 0.0025\n",
    "        }\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Layer cnn is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2.  The layer has dtype float32 because it's dtype defaults to floatx.\n",
      "\n",
      "If you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n",
      "\n",
      "To change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n",
      "\n",
      "['Variable:0', 'cnn/conv1/kernel:0', 'cnn/conv1/bias:0', 'cnn/conv2/kernel:0', 'cnn/conv2/bias:0', 'cnn/votes/kernel:0', 'cnn/votes/bias:0']\n"
     ]
    }
   ],
   "source": [
    "# input size is 11x50x3\n",
    "# Remember: Channels last\n",
    "\n",
    "class CNN(tf.keras.Model):\n",
    "    \n",
    "    def __init__(self, rows = 11, cols = 50, features = 3, batch_size=None):\n",
    "        super(CNN, self).__init__()\n",
    "        \n",
    "        self.tensor_shape = (rows, cols, features)\n",
    "        self.batch_size = batch_size\n",
    "        \n",
    "        self.conv1 = tf.keras.layers.Conv2D(\n",
    "                            filters = 2, \n",
    "                            kernel_size = (1,3), \n",
    "                            padding='valid', \n",
    "                            activation='relu',\n",
    "                            name = 'conv1'\n",
    "                        )    \n",
    "        \n",
    "        self.conv2 =  keras.layers.Conv2D(\n",
    "                            filters = 20, \n",
    "                            kernel_size = (1, cols-2), \n",
    "                            activation=\"relu\", \n",
    "                            name = 'conv2'\n",
    "                        )\n",
    "        self.votes = keras.layers.Conv2D(1, (1,1), name = 'votes')\n",
    "        self.b = tf.Variable(tf.zeros((1, 1), dtype=tf.float32), trainable=True)\n",
    "        self.softmax = tf.keras.layers.Activation('softmax')\n",
    "\n",
    "    def call(self, inputs):\n",
    "        x = self.conv1(inputs[0])\n",
    "        x = self.conv2(x)\n",
    "        x = tf.concat((x, inputs[1]), axis=3)\n",
    "        #x = keras.layers.Concatenate(axis=3)([x, inputs[1]])\n",
    "        x = self.votes(x)\n",
    "        x = tf.squeeze(x)\n",
    "        cash_bias = tf.tile(self.b, [tf.shape(x)[0], 1])\n",
    "        x = tf.concat((cash_bias, x), axis = -1)\n",
    "        x = self.softmax(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "model = CNN()\n",
    "\n",
    "X = np.random.randn(100, 11,50,3)\n",
    "w = np.random.randn(100, 11, 1, 1)\n",
    "with tf.GradientTape() as tape:\n",
    "    y = model([X,w])\n",
    "\n",
    "print([var.name for var in tape.watched_variables()])\n",
    "grads = tape.gradient(y, model.trainable_variables)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import division,absolute_import,print_function\n",
    "import logging\n",
    "\n",
    "class ReplayBuffer:\n",
    "    def __init__(self, start_index, end_index, batch_size, coin_no, sample_bias=1.0):\n",
    "        \"\"\"\n",
    "        :param start_index: start index of the training set on the global data matrices\n",
    "        :param end_index: end index of the training set on the global data matrices\n",
    "        \"\"\"\n",
    "        self.__coin_no = coin_no\n",
    "        self.__experiences = [Experience(i) for i in range(start_index, end_index)]\n",
    "        # NOTE: in order to achieve the previous w feature\n",
    "        self.__batch_size = batch_size\n",
    "        self.__sample_bias = sample_bias\n",
    "        logging.debug(\"buffer_bias is %f\" % sample_bias)\n",
    "\n",
    "    def append_experience(self, state_index):\n",
    "        self.__experiences.append(Experience(state_index))\n",
    "        logging.debug(\"a new experience, indexed by %d, was appended\" % state_index)\n",
    "\n",
    "    def __sample(self, start, end, bias):\n",
    "        \"\"\"\n",
    "        @:param end: is excluded\n",
    "        @:param bias: value in (0, 1)\n",
    "        \"\"\"\n",
    "        # TODO: deal with the case when bias is 0\n",
    "        ran = np.random.geometric(bias)\n",
    "        while ran > end - start:\n",
    "            ran = np.random.geometric(bias)\n",
    "        result = end - ran\n",
    "        return result\n",
    "\n",
    "    def next_experience_batch(self):\n",
    "        # First get a start point randomly\n",
    "        batch_start = self.__sample(0, len(self.__experiences) - self.__batch_size,\n",
    "                                    self.__sample_bias)\n",
    "        batch = self.__experiences[batch_start:batch_start+self.__batch_size]\n",
    "        return batch\n",
    "\n",
    "\n",
    "class Experience:\n",
    "    def __init__(self, state_index):\n",
    "        self.state_index = int(state_index)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Agent:\n",
    "    \n",
    "    def __init__(self, config):\n",
    "        \n",
    "        self.train_config = config['training']        \n",
    "        self.batch_size = self.train_config['batch_size']\n",
    "        self.buffer_bias = self.train_config['buffer_bias']\n",
    "        \n",
    "        self.coin_no = config['coin_no']\n",
    "        self.window_size = config['window_size']\n",
    "        self.global_period = config[\"global_period\"]\n",
    "        self.feature_no = config['feature_no']\n",
    "        \n",
    "        self.no_periods = 150\n",
    "        \n",
    "        self.commission_ratio = config[\"trading_consumption\"]\n",
    "        \n",
    "        #Just make something random\n",
    "        self.global_data = tf.random.uniform(shape = (self.feature_no, self.coin_no, self.no_periods))\n",
    "        \n",
    "        PVM = np.ones((self.global_data.shape[2], self.global_data.shape[1]), dtype='float32')/self.coin_no\n",
    "        self.PVM = pd.DataFrame(PVM)\n",
    "        \n",
    "        # Notice this part is made with pandas.panel\n",
    "#          # portfolio vector memory, [time, assets]\n",
    "#         self.__PVM = pd.DataFrame(index=self.__global_data.minor_axis,\n",
    "#                                   columns=self.__global_data.major_axis)\n",
    "#         self.__PVM = self.__PVM.fillna(1.0 / self.__coin_no)\n",
    "\n",
    "        \n",
    "        self.pv_vector = None\n",
    "        \n",
    "        \n",
    "        self.model = CNN(\n",
    "            config['coin_no'], \n",
    "            config['window_size'], \n",
    "            config['feature_no'], \n",
    "            config['training']['batch_size']\n",
    "        )\n",
    "        \n",
    "        self.divide_data(config['test_portion']) # This gives the indekses of the training and test data\n",
    "        \n",
    "        # This needs to be written such that it gets arguments from config, like sample bias (geo dist)\n",
    "        end_index = self._train_ind[-1]\n",
    "        self.__replay_buffer = ReplayBuffer(start_index=self._train_ind[0],\n",
    "                                               end_index=end_index,\n",
    "                                               sample_bias=self.buffer_bias,\n",
    "                                               batch_size=self.batch_size,\n",
    "                                               coin_no=self.coin_no)\n",
    "    #@tf.function\n",
    "    def train_step(self, batch):\n",
    "        \n",
    "        w = batch['last_w']\n",
    "        w = tf.reshape(w, [w.shape[0], w.shape[1], 1, 1] )\n",
    "        X = tf.transpose(batch['X'], [0, 2, 3, 1])\n",
    "        y = batch['y']\n",
    "                \n",
    "        with tf.GradientTape() as tape:\n",
    "                output = self.model([X, w])\n",
    "                                \n",
    "                # Compute negative reward\n",
    "                loss = self.loss(y, output)\n",
    "\n",
    "        grads = tape.gradient(loss, self.model.trainable_weights)\n",
    "        self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights))\n",
    "\n",
    "        # Save the model output in PVM\n",
    "        #batch['setw'](w[:, 1:])\n",
    "        self.PVM.iloc[self.indexs, :] = output[:, 1:].numpy()\n",
    "        \n",
    "        return loss\n",
    "    \n",
    "    def train(self):\n",
    "        self.optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n",
    "        #loss_metric = -tf.keras.metrics.Mean()\n",
    "        \n",
    "        for step in range(self.train_config['steps']):\n",
    "            \n",
    "            batch = self.next_batch()\n",
    "    \n",
    "            # Do a train step\n",
    "            loss_value = self.train_step(batch)\n",
    "            \n",
    "            # You can write a custom metric here. See tf.org Keras -> Train and Evaluate\n",
    "            #loss_metric(loss)\n",
    "            portfolio_value = tf.reduce_prod(self.pv_vector)\n",
    "            mean = tf.reduce_mean(self.pv_vector)\n",
    "            standard_deviation = tf.math.sqrt(tf.reduce_mean((self.pv_vector - mean) ** 2))\n",
    "            sharp_ratio = (mean - 1) / standard_deviation\n",
    "            \n",
    "            \n",
    "            if step % 200 == 0:\n",
    "                print('Step %2d: loss=%2.5f, cumval=%.1f' %\n",
    "                        (step, -loss_value, portfolio_value))\n",
    "                \n",
    "                # You can add a log between steps here\n",
    "                # Both manually and with tensorboard\n",
    "            \n",
    "            \n",
    "    def next_batch(self):\n",
    "        \"\"\"\n",
    "        @:return: the next batch of training sample. The sample is a dictionary\n",
    "        with key \"X\"(input data); \"y\"(future relative price); \"last_w\" a numpy array\n",
    "        with shape [batch_size, assets]; \"w\" a list of numpy arrays list length is\n",
    "        batch_size\n",
    "        \"\"\"\n",
    "        batch = self.pack_samples([exp.state_index for exp in self.__replay_buffer.next_experience_batch()])\n",
    "        return batch\n",
    "\n",
    "    \n",
    "    def pack_samples(self, indexs):\n",
    "        self.indexs = indexs\n",
    "        indexs = np.array(indexs)\n",
    "        last_w = self.PVM.values[indexs-1, :]\n",
    "\n",
    "        def setw(w):                      # Notice that this function is defined in terms of the specifik indexs\n",
    "            self.PVM.iloc[indexs, :] = w    \n",
    "        M = [self.get_submatrix(index) for index in indexs]   # For each state_index in the batch, get a input tensor\n",
    "        M = np.array(M, dtype='float32')\n",
    "        X = M[:, :, :, :-1]    # X_t tensor\n",
    "        y = M[:, :, :, -1] / M[:, 0, None, :, -2]     # y_{t+1} obtained by dividing all features by prev close price\n",
    "        return {\"X\": X, \"y\": y, \"last_w\": last_w, \"setw\": setw}\n",
    "    \n",
    "\n",
    "    # volume in y is the volume in next access period\n",
    "    def get_submatrix(self, ind):\n",
    "        return self.global_data[:, :, ind-(self.window_size):ind+1]\n",
    "    \n",
    "    \n",
    "    def divide_data(self, test_portion, portion_reversed = False):\n",
    "        train_portion = 1 - test_portion\n",
    "        s = float(train_portion + test_portion)\n",
    "        if portion_reversed:\n",
    "            portions = np.array([test_portion]) / s\n",
    "            portion_split = (portions * self.no_periods).astype(int)\n",
    "            indices = np.arange(self.no_periods)\n",
    "            self._test_ind, self._train_ind = np.split(indices, portion_split)\n",
    "        else:\n",
    "            portions = np.array([train_portion]) / s\n",
    "            portion_split = (portions * self.no_periods).astype(int)\n",
    "            indices = np.arange(self.no_periods)\n",
    "            self._train_ind, self._test_ind = np.split(indices, portion_split)\n",
    "\n",
    "        self._train_ind = self._train_ind[(self.window_size):-1]\n",
    "        # NOTE(zhengyao): change the logic here in order to fit both\n",
    "        # reversed and normal version\n",
    "        self._train_ind = list(self._train_ind)\n",
    "        self._num_train_samples = len(self._train_ind)\n",
    "        self._num_test_samples = len(self._test_ind)\n",
    "\n",
    "        \n",
    "    #get a loss function, which is minus the reward function\n",
    "    def loss(self, y, output):\n",
    "        #r_t = log(mu_t * y_t dot w_{t-1})\n",
    "        \n",
    "        self.future_price = tf.concat([tf.ones([16, 1]), y[:, 0, :]], 1)\n",
    "        self.future_w = (self.future_price * output) / tf.reduce_sum(self.future_price * output, axis=1)[:, None]\n",
    "        self.pv_vector = tf.reduce_sum(output * self.future_price, axis=1) *\\\n",
    "                           (tf.concat([tf.ones(1), self.pure_pc(output)], axis=0))\n",
    "        \n",
    "        \n",
    "        return -tf.reduce_mean(tf.math.log(self.pv_vector))\n",
    "        \n",
    "        \n",
    "        \n",
    "    # consumption vector (on each periods)\n",
    "    def pure_pc(self, output):\n",
    "        c = self.commission_ratio\n",
    "        w_t = self.future_w[:self.batch_size-1]  # rebalanced\n",
    "        w_t1 = output[1:self.batch_size]\n",
    "        mu = 1 - tf.reduce_sum(tf.math.abs(w_t1[:, 1:]-w_t[:, 1:]), axis=1)*c\n",
    "        \"\"\"\n",
    "        mu = 1-3*c+c**2\n",
    "\n",
    "        def recurse(mu0):\n",
    "            factor1 = 1/(1 - c*w_t1[:, 0])\n",
    "            if isinstance(mu0, float):\n",
    "                mu0 = mu0\n",
    "            else:\n",
    "                mu0 = mu0[:, None]\n",
    "            factor2 = 1 - c*w_t[:, 0] - (2*c - c**2)*tf.reduce_sum(\n",
    "                tf.nn.relu(w_t[:, 1:] - mu0 * w_t1[:, 1:]), axis=1)\n",
    "            return factor1*factor2\n",
    "\n",
    "        for i in range(20):\n",
    "            mu = recurse(mu)\n",
    "        \"\"\"\n",
    "        return mu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "agent = Agent(config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step  0: loss=0.60621, cumval=16307.5\n",
      "Step 200: loss=1.22533, cumval=326918176.0\n",
      "Step 400: loss=2.09840, cumval=381250187558912.0\n",
      "Step 600: loss=1.88675, cumval=12896217071616.0\n",
      "Step 800: loss=1.86873, cumval=9666566291456.0\n",
      "Step 1000: loss=2.03957, cumval=148735354994688.0\n",
      "Step 1200: loss=1.96895, cumval=48050126979072.0\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-10-3d92c9b1ff97>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0magent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m<ipython-input-8-9e5f3bc134a0>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m     80\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     81\u001b[0m             \u001b[0;31m# Do a train step\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 82\u001b[0;31m             \u001b[0mloss_value\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     83\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     84\u001b[0m             \u001b[0;31m# You can write a custom metric here. See tf.org Keras -> Train and Evaluate\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-8-9e5f3bc134a0>\u001b[0m in \u001b[0;36mtrain_step\u001b[0;34m(self, batch)\u001b[0m\n\u001b[1;32m     62\u001b[0m                 \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     63\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 64\u001b[0;31m         \u001b[0mgrads\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgradient\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrainable_weights\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     65\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_gradients\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgrads\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrainable_weights\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     66\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/PortfolioManager/venv/lib/python3.8/site-packages/tensorflow/python/eager/backprop.py\u001b[0m in \u001b[0;36mgradient\u001b[0;34m(self, target, sources, output_gradients, unconnected_gradients)\u001b[0m\n\u001b[1;32m   1040\u001b[0m                           for x in nest.flatten(output_gradients)]\n\u001b[1;32m   1041\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1042\u001b[0;31m     flat_grad = imperative_grad.imperative_grad(\n\u001b[0m\u001b[1;32m   1043\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tape\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1044\u001b[0m         \u001b[0mflat_targets\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/PortfolioManager/venv/lib/python3.8/site-packages/tensorflow/python/eager/imperative_grad.py\u001b[0m in \u001b[0;36mimperative_grad\u001b[0;34m(tape, target, sources, output_gradients, sources_raw, unconnected_gradients)\u001b[0m\n\u001b[1;32m     69\u001b[0m         \"Unknown value for unconnected_gradients: %r\" % unconnected_gradients)\n\u001b[1;32m     70\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 71\u001b[0;31m   return pywrap_tfe.TFE_Py_TapeGradient(\n\u001b[0m\u001b[1;32m     72\u001b[0m       \u001b[0mtape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tape\u001b[0m\u001b[0;34m,\u001b[0m  \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     73\u001b[0m       \u001b[0mtarget\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/PortfolioManager/venv/lib/python3.8/site-packages/tensorflow/python/eager/backprop.py\u001b[0m in \u001b[0;36m_gradient_function\u001b[0;34m(op_name, attr_tuple, num_inputs, inputs, outputs, out_grads, skip_input_indices, forward_pass_name_scope)\u001b[0m\n\u001b[1;32m    155\u001b[0m       \u001b[0mgradient_name_scope\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"gradient_tape/\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    156\u001b[0m     \u001b[0;32mwith\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname_scope\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgradient_name_scope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 157\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mgrad_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmock_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mout_grads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    158\u001b[0m   \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    159\u001b[0m     \u001b[0;32mreturn\u001b[0m \u001b[0mgrad_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmock_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mout_grads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/PortfolioManager/venv/lib/python3.8/site-packages/tensorflow/python/ops/nn_grad.py\u001b[0m in \u001b[0;36m_Conv2DGrad\u001b[0;34m(op, grad)\u001b[0m\n\u001b[1;32m    591\u001b[0m           \u001b[0muse_cudnn_on_gpu\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0muse_cudnn_on_gpu\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    592\u001b[0m           data_format=data_format),\n\u001b[0;32m--> 593\u001b[0;31m       gen_nn_ops.conv2d_backprop_filter(\n\u001b[0m\u001b[1;32m    594\u001b[0m           \u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    595\u001b[0m           \u001b[0mshape_1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/PortfolioManager/venv/lib/python3.8/site-packages/tensorflow/python/ops/gen_nn_ops.py\u001b[0m in \u001b[0;36mconv2d_backprop_filter\u001b[0;34m(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name)\u001b[0m\n\u001b[1;32m   1074\u001b[0m   \u001b[0;32mif\u001b[0m \u001b[0mtld\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_eager\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1075\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1076\u001b[0;31m       _result = pywrap_tfe.TFE_Py_FastPathExecute(\n\u001b[0m\u001b[1;32m   1077\u001b[0m         \u001b[0m_ctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_context_handle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtld\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"Conv2DBackpropFilter\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1078\u001b[0m         \u001b[0mtld\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mop_callbacks\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilter_sizes\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout_backprop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"strides\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "agent.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch = agent.next_batch()\n",
    "w = batch['last_w']\n",
    "w = tf.reshape(w, [w.shape[0], w.shape[1], 1, 1] )\n",
    "X = tf.transpose(batch['X'], [0, 2, 3, 1])\n",
    "y = batch['y']\n",
    "output=agent.model([X,w])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "PVM = np.ones((agent.global_data.shape[2], agent.global_data.shape[1]))/agent.coin_no\n",
    "PVM = pd.DataFrame(PVM)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "PVM.iloc[agent.indexs, :] = output[:, 1:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "w = agent.next_batch()['last_w']\n",
    "w = tf.reshape(w, [w.shape[0], w.shape[1], 1, 1] )\n",
    "tensor = tf.transpose(agent.next_batch()['X'], [0, 2, 3, 1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "future = tf.concat([tf.ones([16, 1]), agent.next_batch()['y'][:, 0, :]], 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "output = agent.model([tensor, w])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "output.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "(output * future) / np.sum(future * output, axis=1)[:,None]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "b_init = tf.zeros_initializer()\n",
    "b = tf.Variable(\n",
    "                initial_value=b_init(shape=(1, 1), dtype=\"float32\"),\n",
    "                trainable=True\n",
    "                )\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
