{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "import gym\n",
    "import os \n",
    "import sys\n",
    "import itertools\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from collections import defaultdict, namedtuple\n",
    "\n",
    "import matplotlib\n",
    "from matplotlib import pyplot as plt\n",
    "%matplotlib inline\n",
    "matplotlib.style.use('ggplot')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.\u001b[0m\n",
      "env.action_sapce: 2\n",
      "env.observation_sapce: 4\n",
      "env.observation_space.high: [4.8000002e+00 3.4028235e+38 4.1887903e-01 3.4028235e+38]\n",
      "env.observation_space.low: [-4.8000002e+00 -3.4028235e+38 -4.1887903e-01 -3.4028235e+38]\n"
     ]
    }
   ],
   "source": [
    "env = gym.envs.make('CartPole-v0')\n",
    "env = env.unwrapped\n",
    "env.seed(1)\n",
    "\n",
    "print(\"env.action_sapce:\", env.action_space.n)\n",
    "print(\"env.observation_sapce:\", env.observation_space.shape[0])\n",
    "print(\"env.observation_space.high:\", env.observation_space.high)\n",
    "print(\"env.observation_space.low:\", env.observation_space.low)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PolicyGradient():\n",
    "    \"\"\"\n",
    "    Policy Gradient REinforcement Learning.\n",
    "    used a 3 layer neural network as the policy network.\n",
    "    \"\"\"\n",
    "    def __init__(self, n_x, n_y,\n",
    "                learning_rate=0.01, load_path=None, save_path=None):\n",
    "        self.n_x = n_x\n",
    "        self.n_y = n_y\n",
    "        self.lr = learning_rate\n",
    "        self.episode_rewards = []\n",
    "        \n",
    "        self.__build_network()\n",
    "        self.sess = tf.Session()\n",
    "        \n",
    "        tf.summary.FileWriter(\"logs_actor/\", self.sess.graph)\n",
    "        self.sess.run(tf.global_variables_initializer())\n",
    "        self.saver = tf.train.Saver()\n",
    "        \n",
    "    def __build_network(self):\n",
    "        \"\"\"\n",
    "        build the natural network\n",
    "        \"\"\"\n",
    "        # Create placeholders\n",
    "        with tf.name_scope('actor_inputs'):\n",
    "            self.X = tf.placeholder(tf.float32, shape=(self.n_x, None), name=\"state\")\n",
    "            self.Y = tf.placeholder(tf.float32, shape=(self.n_y, None), name=\"action\")\n",
    "            self.td_error = tf.placeholder(tf.float32, name=\"td_error\")\n",
    "\n",
    "        layer1_units = 10\n",
    "        layer2_units = 10\n",
    "        layer_output_units = self.n_y\n",
    "        \n",
    "        with tf.name_scope(\"actor_parameter\"):\n",
    "            W1 = self.__weigfht_variable([layer1_units, self.n_x], \"aW1\")\n",
    "            b1 = self.__bias_bariable([layer1_units, 1], \"ab1\")\n",
    "            W2 = self.__weigfht_variable([layer2_units, layer1_units], \"aW2\")\n",
    "            b2 = self.__bias_bariable([layer2_units, 1], \"ab2\")\n",
    "            W3 = self.__weigfht_variable([layer_output_units, layer2_units], \"aW3\")\n",
    "            b3 = self.__bias_bariable([layer_output_units, 1], \"ab3\")\n",
    "        \n",
    "        with tf.name_scope(\"actor_layer1\"):\n",
    "            z1 = tf.add(tf.matmul(W1, self.X), b1)\n",
    "            a1 = tf.nn.relu(z1)\n",
    "        with tf.name_scope(\"actor_layer2\"):\n",
    "            z2 = tf.add(tf.matmul(W2, a1), b2)\n",
    "            a2 = tf.nn.relu(z2)\n",
    "        with tf.name_scope(\"actor_layer_output\"):\n",
    "            z3 = tf.add(tf.matmul(W3, a2), b3)\n",
    "            a3 = tf.nn.softmax(z3)\n",
    "\n",
    "        # Softmax outputs, we need to transpose as tensorflow nn functions expects them in this shape\n",
    "        logits = tf.transpose(z3)\n",
    "        labels = tf.transpose(self.Y)\n",
    "        self.outputs_softmax = tf.nn.softmax(logits, name='A3')\n",
    "\n",
    "        with tf.name_scope('actor_loss'):\n",
    "            neg_log_prob = tf.squared_difference(logits, labels)\n",
    "            loss = tf.reduce_mean(neg_log_prob * self.td_error)\n",
    "\n",
    "        with tf.name_scope('actor_train'):\n",
    "            self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)\n",
    "        \n",
    "    def __weigfht_variable(self, shape, name):\n",
    "        initial = tf.contrib.layers.xavier_initializer(seed=1)\n",
    "        return tf.get_variable(name, shape, initializer=initial)\n",
    "    \n",
    "    def __bias_bariable(self, shape, name):\n",
    "        initial = tf.contrib.layers.xavier_initializer(seed=1)\n",
    "        return tf.get_variable(name, shape, initializer=initial)\n",
    "        \n",
    "    def __disc_and_norm_rewards(self):   \n",
    "        disc_norm_ep_rewards = np.zeros_like(self.episode_rewards)\n",
    "        disc_norm_ep_rewards -= np.mean(disc_norm_ep_rewards)\n",
    "        disc_norm_ep_rewards /= np.std(disc_norm_ep_rewards)\n",
    "        return disc_norm_ep_rewards\n",
    "    \n",
    "    def store_transition(self, error):\n",
    "        \"\"\"\n",
    "        Store memory for network training\n",
    "        \"\"\"\n",
    "        self.episode_rewards.append(error)\n",
    "    \n",
    "    def predict(self, state):\n",
    "        \"\"\"\n",
    "        choose action base on given state\n",
    "        \"\"\"\n",
    "        # reshape state to (num_features, 1)\n",
    "        state = state[:, np.newaxis]\n",
    "        \n",
    "        # get softmax probabilities\n",
    "        prob_weights = self.sess.run(self.outputs_softmax, feed_dict={self.X: state})\n",
    "        \n",
    "        # return sampled action\n",
    "        action = np.random.choice(range(len(prob_weights.ravel())), p=prob_weights.ravel())\n",
    "        return action\n",
    "        \n",
    "    def learn(self, state, action, reward):\n",
    "        \"\"\"\n",
    "        Accroding the game memory traing the network\n",
    "        \"\"\"\n",
    "        # discount and normalize episode reward\n",
    "        disc_norm_ep_reward = self.__disc_and_norm_rewards()\n",
    "        \n",
    "        action__ = np.zeros(self.n_y)\n",
    "        action__[action] = 1\n",
    "        \n",
    "        # train on episodes\n",
    "        self.sess.run(self.train_op, feed_dict={\n",
    "            self.X: np.vstack(state),\n",
    "            self.Y: np.vstack(action__),\n",
    "            self.td_error: disc_norm_ep_reward,  \n",
    "        })\n",
    "        \n",
    "        # Reset the episode data\n",
    "        self.episode_rewards  = []\n",
    "        \n",
    "        return disc_norm_ep_reward"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ValueEstimator():\n",
    "    \"\"\"\n",
    "    Policy Gradient REinforcement Learning.\n",
    "    used a 3 layer neural network as the policy network.\n",
    "    \"\"\"\n",
    "    def __init__(self, n_x, n_y,\n",
    "                learning_rate=0.01, load_path=None, save_path=None):\n",
    "        self.n_x = n_x\n",
    "        self.n_y = n_y\n",
    "        self.lr = learning_rate\n",
    "        self.episode_rewards = []\n",
    "        \n",
    "        self.__build_network()\n",
    "        self.sess = tf.Session()\n",
    "        \n",
    "        tf.summary.FileWriter(\"critic_logs/\", self.sess.graph)\n",
    "        self.sess.run(tf.global_variables_initializer())\n",
    "        self.saver = tf.train.Saver()\n",
    "        \n",
    "    def __build_network(self):\n",
    "        \"\"\"\n",
    "        build the natural network\n",
    "        \"\"\"\n",
    "        # Create placeholders\n",
    "        with tf.name_scope('critic_inputs'):\n",
    "            self.X = tf.placeholder(tf.float32, shape=(self.n_x, None), name=\"X\")\n",
    "            self.Y = tf.placeholder(tf.float32, name=\"Y\")\n",
    "\n",
    "        layer1_units = 10\n",
    "        layer2_units = 10\n",
    "        layer_output_units = self.n_y\n",
    "        \n",
    "        with tf.name_scope(\"critic_parameter\"):\n",
    "            W1 = self.__weigfht_variable([layer1_units, self.n_x], \"cW1\")\n",
    "            b1 = self.__bias_bariable([layer1_units, 1], \"cb1\")\n",
    "            W2 = self.__weigfht_variable([layer2_units, layer1_units], \"cW2\")\n",
    "            b2 = self.__bias_bariable([layer2_units, 1], \"cb2\")\n",
    "            W3 = self.__weigfht_variable([layer_output_units, layer2_units], \"cW3\")\n",
    "            b3 = self.__bias_bariable([layer_output_units, 1], \"cb3\")\n",
    "        \n",
    "        with tf.name_scope(\"critic_layer1\"):\n",
    "            z1 = tf.add(tf.matmul(W1, self.X), b1)\n",
    "            a1 = tf.nn.relu(z1)\n",
    "        with tf.name_scope(\"critic_layer2\"):\n",
    "            z2 = tf.add(tf.matmul(W2, a1), b2)\n",
    "            a2 = tf.nn.relu(z2)\n",
    "        with tf.name_scope(\"critic_layer_output\"):\n",
    "            z3 = tf.add(tf.matmul(W3, a2), b3)\n",
    "            a3 = tf.nn.relu(z3)\n",
    "\n",
    "        # transpose as tensorflow nn functions expects them in this shape\n",
    "        logits = tf.transpose(z3)\n",
    "        labels = tf.transpose(self.Y)\n",
    "        self.outputs = tf.nn.relu(logits, name='A3')\n",
    "\n",
    "        with tf.name_scope('critic_loss'):\n",
    "            loss = tf.squared_difference(logits, labels)\n",
    "\n",
    "        with tf.name_scope('critic_train'):\n",
    "            self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)\n",
    "        \n",
    "    def __weigfht_variable(self, shape, name):\n",
    "        initial = tf.contrib.layers.xavier_initializer(seed=1)\n",
    "        return tf.get_variable(name, shape, initializer=initial)\n",
    "    \n",
    "    def __bias_bariable(self, shape, name):\n",
    "        initial = tf.contrib.layers.xavier_initializer(seed=1)\n",
    "        return tf.get_variable(name, shape, initializer=initial)\n",
    "        \n",
    "    def __disc_and_norm_rewards(self):   \n",
    "        disc_norm_ep_rewards = np.zeros_like(self.episode_rewards)\n",
    "        disc_norm_ep_rewards -= np.mean(disc_norm_ep_rewards)\n",
    "        disc_norm_ep_rewards /= np.std(disc_norm_ep_rewards)\n",
    "        return disc_norm_ep_rewards\n",
    "    \n",
    "    def store_transition(self, target):\n",
    "        \"\"\"\n",
    "        Store memory for network training\n",
    "        \"\"\"\n",
    "        self.episode_rewards.append(target)\n",
    "    \n",
    "    def predict(self, state):\n",
    "        \"\"\"\n",
    "        choose action base on given state\n",
    "        \"\"\"\n",
    "        # reshape state to (num_features, 1)\n",
    "        state = state[:, np.newaxis]\n",
    "        \n",
    "        # get softmax probabilities\n",
    "        prob_weights = self.sess.run(self.outputs, feed_dict={self.X: state})\n",
    "        \n",
    "        return prob_weights[0]\n",
    "        \n",
    "    def learn(self, state, target):\n",
    "        \"\"\"\n",
    "        Accroding the game memory traing the network\n",
    "        \"\"\"\n",
    "        # discount and normalize episode reward\n",
    "        disc_norm_ep_reward = self.__disc_and_norm_rewards()\n",
    "        state = state[:, np.newaxis]\n",
    "        \n",
    "        # train on episodes\n",
    "        self.sess.run(self.train_op, feed_dict={\n",
    "            self.X: np.vstack(state),\n",
    "            self.Y: np.vstack(target)})\n",
    "        \n",
    "        # Reset the episode data\n",
    "        self.episode_rewards  = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Actor_Critic():\n",
    "    \"\"\"\n",
    "    Actor Critic Algorithm. Optimizes the policy function approximator using policy gradient.\n",
    "    \"\"\"\n",
    "    def __init__(self, env, num_episodes=200, learning_rate=0.01, reward_decay=0.95):\n",
    "        \n",
    "        self.nA = env.action_space.n\n",
    "        self.nS = env.observation_space.shape[0]\n",
    "        self.nR = 1\n",
    "        self.env = env\n",
    "        self.num_episodes = num_episodes\n",
    "        self.reward_decay = reward_decay\n",
    "        self.learning_rate = learning_rate\n",
    "        self.rewards = []\n",
    "        self.RENDER_REWARD_MIN = 50\n",
    "        self.RENDER_ENV = False\n",
    "        \n",
    "        self.actor = PolicyGradient(n_x=self.nS, n_y=self.nA, \n",
    "                                 learning_rate=self.learning_rate)\n",
    "        self.critic = ValueEstimator(n_x=self.nS, n_y=self.nR,\n",
    "                                 learning_rate=self.learning_rate)\n",
    "        \n",
    "        # keep track of useful statistic\n",
    "        record_head = namedtuple(\"Stats\", [\"episode_lengths\",\"episode_rewards\"])\n",
    "        self.record = record_head(\n",
    "                                episode_lengths = np.zeros(num_episodes),\n",
    "                                episode_rewards = np.zeros(num_episodes))\n",
    "        \n",
    "    def mcpg_learn(self):\n",
    "        \"\"\"\n",
    "        Actor Critic Algorithm core code. \n",
    "        \"\"\"\n",
    "        for i_episode in range(self.num_episodes):\n",
    "            # print the number iter episode\n",
    "            num_present = (i_episode+1) / self.num_episodes\n",
    "            print(\"Episode {}/{}\".format(i_episode + 1, self.num_episodes)) # end=\"\"\n",
    "            print(\"=\" * round(num_present*60))\n",
    "        \n",
    "            # Reset the environment and pick the first action\n",
    "            state = env.reset()\n",
    "            reward_ = 0\n",
    "            \n",
    "            # One step in the environemt, replace code(while(True))\n",
    "            for t in itertools.count():\n",
    "                if self.RENDER_ENV: env.render()\n",
    "                \n",
    "                # step1: choose an action based on policy estimator(Actor)\n",
    "                action = self.actor.predict(state)\n",
    "                \n",
    "                # step2: take a step in the environment\n",
    "                next_state, reward, done, _ = env.step(action)\n",
    "                print(\"state:\", next_state,\"reward:\", reward, \"action:\",action)\n",
    "                reward_ += reward\n",
    "                \n",
    "                # step3: calculate TD Target\n",
    "                value_next = self.critic.predict(next_state)\n",
    "                td_target = reward + self.reward_decay * value_next\n",
    "                td_error = td_target - self.critic.predict(state)\n",
    "                print(\"value_next\", value_next, \"td_target\",td_target, \"td_error\", td_error)\n",
    "                \n",
    "                self.actor.store_transition(td_error)\n",
    "                self.critic.store_transition(td_target)\n",
    "                    \n",
    "                # step4: update the value estimator(Critic)\n",
    "                self.critic.learn(state, td_target)\n",
    "                \n",
    "                # step5: update the policy estimator(Actor)\n",
    "                self.actor.learn(state, action, td_error)\n",
    "                                \n",
    "                # update statistics\n",
    "                self.record.episode_rewards[i_episode] += reward\n",
    "                self.record.episode_lengths[i_episode] = t\n",
    "                \n",
    "                if done:\n",
    "                    self.rewards.append(reward_)\n",
    "                    max_reward = np.amax(self.rewards)\n",
    "                    \n",
    "                    print(\"reward:{}, max reward:{}, episode len:{}\\n\".format(reward_, max_reward, t+1))\n",
    "                    if max_reward > self.RENDER_REWARD_MIN: self.RENDER_ENV = True\n",
    "                    break\n",
    "                    \n",
    "                # step6: save new state\n",
    "                state = next_state\n",
    "        \n",
    "        return self.record"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Episode 1/200\n",
      "\n",
      "state: [ 0.03076804  0.19700098 -0.03151444 -0.33357875] reward: 1.0 action: 1\n",
      "value_next [0.00828713] td_target [1.0078728] td_error [1.0036727]\n",
      "state: [ 0.03470806  0.00234142 -0.03818601 -0.05099809] reward: 1.0 action: 0\n",
      "value_next [0.11997241] td_target [1.1139737] td_error [0.99601763]\n",
      "state: [ 0.03475489 -0.19221277 -0.03920597  0.22939649] reward: 1.0 action: 0\n",
      "value_next [0.18518138] td_target [1.1759223] td_error [0.94230187]\n",
      "state: [ 0.03091064 -0.38675317 -0.03461804  0.50945924] reward: 1.0 action: 0\n",
      "value_next [0.22321147] td_target [1.2120509] td_error [0.9058835]\n",
      "state: [ 0.02317557 -0.58137075 -0.02442886  0.79103503] reward: 1.0 action: 0\n",
      "value_next [0.2956518] td_target [1.2808692] td_error [0.9110272]\n",
      "state: [ 0.01154816 -0.77614891 -0.00860816  1.07593372] reward: 1.0 action: 0\n",
      "value_next [0.4121108] td_target [1.3915052] td_error [0.92478454]\n",
      "state: [-0.00397482 -0.97115607  0.01291052  1.36590287] reward: 1.0 action: 0\n",
      "value_next [0.6398022] td_target [1.6078122] td_error [1.0064836]\n",
      "state: [-0.02339794 -1.16643728  0.04022857  1.66259585] reward: 1.0 action: 0\n",
      "value_next [0.9026236] td_target [1.8574924] td_error [1.0455098]\n",
      "state: [-0.04672669 -1.36200394  0.07348049  1.96753251] reward: 1.0 action: 0\n",
      "value_next [1.2123718] td_target [2.1517532] td_error [1.0517511]\n",
      "state: [-0.07396677 -1.55782143  0.11283114  2.28204996] reward: 1.0 action: 0\n",
      "value_next [1.5736804] td_target [2.4949963] td_error [1.0562763]\n",
      "state: [-0.10512319 -1.75379401  0.15847214  2.60724166] reward: 1.0 action: 0\n",
      "value_next [2.0067208] td_target [2.9063847] td_error [1.0724149]\n",
      "state: [-0.14019908 -1.94974718  0.21061697  2.94388438] reward: 1.0 action: 0\n",
      "value_next [2.522727] td_target [3.3965907] td_error [1.0906658]\n",
      "reward:12.0, max reward:12.0, episode len:12\n",
      "\n",
      "Episode 2/200\n",
      "=\n",
      "state: [ 0.02635582 -0.23929621 -0.04301703  0.28031374] reward: 1.0 action: 0\n",
      "value_next [1.5910732] td_target [2.5115194] td_error [1.0500785]\n",
      "state: [ 0.0215699  -0.43377897 -0.03741075  0.55912491] reward: 1.0 action: 0\n",
      "value_next [1.8777294] td_target [2.783843] td_error [1.0402901]\n",
      "state: [ 0.01289432 -0.62835639 -0.02622826  0.83979062] reward: 1.0 action: 0\n",
      "value_next [2.2022858] td_target [3.0921714] td_error [1.0278265]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:73: RuntimeWarning: invalid value encountered in true_divide\n",
      "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:75: RuntimeWarning: invalid value encountered in true_divide\n",
      "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:95: RuntimeWarning: invalid value encountered in less\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [ 3.27187955e-04 -8.23110607e-01 -9.43244338e-03  1.12411125e+00] reward: 1.0 action: 0\n",
      "value_next [2.5702085] td_target [3.441698] td_error [1.0207994]\n",
      "state: [-0.01613502 -1.01810765  0.01304978  1.4138207 ] reward: 1.0 action: 0\n",
      "value_next [3.0046556] td_target [3.8544228] td_error [1.0256937]\n",
      "state: [-0.03649718 -1.21338885  0.0413262   1.71055409] reward: 1.0 action: 0\n",
      "value_next [3.5279884] td_target [4.351589] td_error [1.0419993]\n",
      "state: [-0.06076495 -1.40896044  0.07553728  2.01580745] reward: 1.0 action: 0\n",
      "value_next [4.179515] td_target [4.970539] td_error [1.0949845]\n",
      "state: [-0.08894416 -1.60478082  0.11585343  2.33088726] reward: 1.0 action: 0\n",
      "value_next [4.9673467] td_target [5.7189794] td_error [1.1397548]\n",
      "state: [-0.12103978 -1.80074514  0.16247117  2.65684804] reward: 1.0 action: 0\n",
      "value_next [5.894919] td_target [6.600173] td_error [1.15557]\n",
      "state: [-0.15705468 -1.99666731  0.21560813  2.99441767] reward: 1.0 action: 0\n",
      "value_next [6.9752893] td_target [7.626525] td_error [1.1601834]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 3/200\n",
      "=\n",
      "state: [ 0.01877362 -0.19971191  0.04075166  0.33261762] reward: 1.0 action: 0\n",
      "value_next [3.4326897] td_target [4.261055] td_error [1.2741485]\n",
      "state: [ 0.01477938 -0.39538949  0.04740401  0.63786798] reward: 1.0 action: 0\n",
      "value_next [4.1839137] td_target [4.974718] td_error [1.2961783]\n",
      "state: [ 0.00687159 -0.59113929  0.06016137  0.9450944 ] reward: 1.0 action: 0\n",
      "value_next [5.0368376] td_target [5.7849956] td_error [1.2941351]\n",
      "state: [-0.00495119 -0.78701776  0.07906326  1.25605731] reward: 1.0 action: 0\n",
      "value_next [6.003001] td_target [6.7028513] td_error [1.2909021]\n",
      "state: [-0.02069155 -0.98305783  0.1041844   1.57241919] reward: 1.0 action: 0\n",
      "value_next [7.1054153] td_target [7.7501445] td_error [1.2901096]\n",
      "state: [-0.0403527  -1.17925702  0.13563279  1.89569705] reward: 1.0 action: 0\n",
      "value_next [8.431] td_target [9.00945] td_error [1.3424764]\n",
      "state: [-0.06393785 -1.37556368  0.17354673  2.22720659] reward: 1.0 action: 0\n",
      "value_next [9.963924] td_target [10.465728] td_error [1.347559]\n",
      "state: [-0.09144912 -1.57186093  0.21809086  2.56799647] reward: 1.0 action: 0\n",
      "value_next [11.742087] td_target [12.154983] td_error [1.3548155]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 4/200\n",
      "=\n",
      "state: [-0.03387007 -0.2427815   0.03877906  0.29609456] reward: 1.0 action: 0\n",
      "value_next [5.8605804] td_target [6.567551] td_error [1.6454868]\n",
      "state: [-0.0387257  -0.43843421  0.04470095  0.60075116] reward: 1.0 action: 0\n",
      "value_next [7.2667055] td_target [7.9033704] td_error [1.6434002]\n",
      "state: [-0.04749438 -0.63415206  0.05671597  0.90717237] reward: 1.0 action: 0\n",
      "value_next [8.855789] td_target [9.412999] td_error [1.6480665]\n",
      "state: [-0.06017742 -0.8299941   0.07485942  1.21712864] reward: 1.0 action: 0\n",
      "value_next [10.664913] td_target [11.131667] td_error [1.660182]\n",
      "state: [-0.07677731 -1.02599732  0.09920199  1.53229819] reward: 1.0 action: 0\n",
      "value_next [12.823542] td_target [13.182364] td_error [1.7474403]\n",
      "state: [-0.09729725 -1.222165    0.12984795  1.8542206 ] reward: 1.0 action: 0\n",
      "value_next [15.30101] td_target [15.535959] td_error [1.7589493]\n",
      "state: [-0.12174055 -1.41845318  0.16693237  2.18424233] reward: 1.0 action: 0\n",
      "value_next [18.153141] td_target [18.245483] td_error [1.7747402]\n",
      "state: [-0.15010962 -1.61475501  0.21061721  2.52345241] reward: 1.0 action: 0\n",
      "value_next [21.435997] td_target [21.364197] td_error [1.7930832]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 5/200\n",
      "==\n",
      "state: [-0.00283339 -0.19470528  0.03303272  0.33535122] reward: 1.0 action: 0\n",
      "value_next [9.839769] td_target [10.347781] td_error [2.2387276]\n",
      "state: [-0.00672749 -0.39028139  0.03973974  0.6382652 ] reward: 1.0 action: 0\n",
      "value_next [12.32426] td_target [12.708047] td_error [2.240631]\n",
      "state: [-0.01453312 -0.58593428  0.05250505  0.94319324] reward: 1.0 action: 0\n",
      "value_next [15.174758] td_target [15.416019] td_error [2.3008928]\n",
      "state: [-0.02625181 -0.78172284  0.07136891  1.25190075] reward: 1.0 action: 0\n",
      "value_next [18.559881] td_target [18.631887] td_error [2.4530869]\n",
      "state: [-0.04188626 -0.97768291  0.09640693  1.56605724] reward: 1.0 action: 0\n",
      "value_next [22.41097] td_target [22.290422] td_error [2.4738731]\n",
      "state: [-0.06143992 -1.17381564  0.12772807  1.88718995] reward: 1.0 action: 0\n",
      "value_next [26.808361] td_target [26.467943] td_error [2.5016232]\n",
      "state: [-0.08491623 -1.37007393  0.16547187  2.21662929] reward: 1.0 action: 0\n",
      "value_next [31.834734] td_target [31.242996] td_error [2.5349865]\n",
      "state: [-0.11231771 -1.56634666  0.20980446  2.555444  ] reward: 1.0 action: 0\n",
      "value_next [37.57127] td_target [36.692707] td_error [2.5709038]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 6/200\n",
      "==\n",
      "state: [ 0.00740251 -0.24089143  0.03135807  0.33412478] reward: 1.0 action: 0\n",
      "value_next [16.21286] td_target [16.402218] td_error [3.2370157]\n",
      "state: [ 0.00258468 -0.43644533  0.03804057  0.63652918] reward: 1.0 action: 0\n",
      "value_next [20.486235] td_target [20.461924] td_error [3.2763252]\n",
      "state: [-0.00614422 -0.63207657  0.05077115  0.94094498] reward: 1.0 action: 0\n",
      "value_next [25.610298] td_target [25.329782] td_error [3.5720577]\n",
      "state: [-0.01878576 -0.82784468  0.06959005  1.24913905] reward: 1.0 action: 0\n",
      "value_next [31.401283] td_target [30.831219] td_error [3.6078663]\n",
      "state: [-0.03534265 -1.02378626  0.09457283  1.562783  ] reward: 1.0 action: 0\n",
      "value_next [37.974037] td_target [37.075336] td_error [3.661663]\n",
      "state: [-0.05581837 -1.21990348  0.12582849  1.88340724] reward: 1.0 action: 0\n",
      "value_next [45.42867] td_target [44.157234] td_error [3.7107697]\n",
      "state: [-0.08021644 -1.41615055  0.16349664  2.21234659] reward: 1.0 action: 0\n",
      "value_next [53.899857] td_target [52.204865] td_error [3.7622757]\n",
      "state: [-0.10853946 -1.612418    0.20774357  2.55067583] reward: 1.0 action: 0\n",
      "value_next [63.529713] td_target [61.353226] td_error [3.8287277]\n",
      "state: [-0.14078782 -1.80851479  0.25875709  2.89913478] reward: 1.0 action: 0\n",
      "value_next [74.43411] td_target [71.7124] td_error [3.8924713]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 7/200\n",
      "==\n",
      "state: [-0.04315166 -0.18208625 -0.02922762  0.30871037] reward: 1.0 action: 0\n",
      "value_next [26.2555] td_target [25.942726] td_error [4.943224]\n",
      "state: [-0.04679338 -0.37677983 -0.02305341  0.59203431] reward: 1.0 action: 0\n",
      "value_next [33.873657] td_target [33.179974] td_error [5.4640923]\n",
      "state: [-0.05432898 -0.57157159 -0.01121273  0.8773672 ] reward: 1.0 action: 0\n",
      "value_next [42.507122] td_target [41.381767] td_error [5.565956]\n",
      "state: [-0.06576041 -0.76653937  0.00633462  1.16650402] reward: 1.0 action: 0\n",
      "value_next [52.182255] td_target [50.573143] td_error [5.6148453]\n",
      "state: [-0.0810912  -0.96174318  0.0296647   1.46116624] reward: 1.0 action: 0\n",
      "value_next [63.065765] td_target [60.912476] td_error [5.680809]\n",
      "state: [-0.10032606 -1.15721596  0.05888802  1.76296654] reward: 1.0 action: 0\n",
      "value_next [75.34702] td_target [72.57967] td_error [5.765747]\n",
      "state: [-0.12347038 -1.35295243  0.09414735  2.07336538] reward: 1.0 action: 0\n",
      "value_next [89.2223] td_target [85.761185] td_error [5.8675385]\n",
      "state: [-0.15052943 -1.54889565  0.13561466  2.39361704] reward: 1.0 action: 0\n",
      "value_next [104.889435] td_target [100.64496] td_error [5.980995]\n",
      "state: [-0.18150734 -1.74492086  0.183487    2.72470373] reward: 1.0 action: 0\n",
      "value_next [122.55027] td_target [117.42275] td_error [6.0981216]\n",
      "state: [-0.21640576 -1.94081688  0.23798108  3.06725812] reward: 1.0 action: 0\n",
      "value_next [142.41199] td_target [136.29138] td_error [6.2072754]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 8/200\n",
      "==\n",
      "state: [ 0.02649051 -0.17793561  0.04949688  0.33605504] reward: 1.0 action: 0\n",
      "value_next [45.899834] td_target [44.604843] td_error [9.520901]\n",
      "state: [ 0.0229318  -0.37372574  0.05621798  0.64392669] reward: 1.0 action: 0\n",
      "value_next [60.38508] td_target [58.365826] td_error [10.012733]\n",
      "state: [ 0.01545729 -0.56958427  0.06909651  0.95376973] reward: 1.0 action: 0\n",
      "value_next [76.49327] td_target [73.66861] td_error [10.070145]\n",
      "state: [ 0.0040656  -0.76556438  0.08817191  1.26733664] reward: 1.0 action: 0\n",
      "value_next [94.4448] td_target [90.72256] td_error [10.145195]\n",
      "state: [-0.01124569 -0.96169504  0.11351864  1.58627875] reward: 1.0 action: 0\n",
      "value_next [114.51495] td_target [109.78921] td_error [10.245407]\n",
      "state: [-0.03047959 -1.15796871  0.14524422  1.91209718] reward: 1.0 action: 0\n",
      "value_next [137.00056] td_target [131.15053] td_error [10.369484]\n",
      "state: [-0.05363896 -1.35432727  0.18348616  2.24608554] reward: 1.0 action: 0\n",
      "value_next [162.19618] td_target [155.08636] td_error [10.509003]\n",
      "state: [-0.08072551 -1.55064577  0.22840787  2.5892628 ] reward: 1.0 action: 0\n",
      "value_next [190.38998] td_target [181.87048] td_error [10.649933]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 9/200\n",
      "===\n",
      "state: [-0.03329182 -0.21446098  0.01012827  0.32868126] reward: 1.0 action: 0\n",
      "value_next [70.270874] td_target [67.75733] td_error [14.506241]\n",
      "state: [-0.03758104 -0.40972564  0.0167019   0.62454089] reward: 1.0 action: 0\n",
      "value_next [91.93051] td_target [88.333984] td_error [14.545273]\n",
      "state: [-0.04577556 -0.60507674  0.02919271  0.92243679] reward: 1.0 action: 0\n",
      "value_next [115.87296] td_target [111.079315] td_error [14.593315]\n",
      "state: [-0.05787709 -0.8005807   0.04764145  1.22414922] reward: 1.0 action: 0\n",
      "value_next [142.40718] td_target [136.28682] td_error [14.6719055]\n",
      "state: [-0.07388871 -0.99628276  0.07212443  1.53137053] reward: 1.0 action: 0\n",
      "value_next [171.9151] td_target [164.31934] td_error [14.79129]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.09381436 -1.19219619  0.10275184  1.84566285] reward: 1.0 action: 0\n",
      "value_next [204.80891] td_target [195.56847] td_error [14.95076]\n",
      "state: [-0.11765828 -1.38828956  0.1396651   2.16840744] reward: 1.0 action: 0\n",
      "value_next [241.5013] td_target [230.42622] td_error [15.140091]\n",
      "state: [-0.14542408 -1.58447177  0.18303325  2.50074349] reward: 1.0 action: 0\n",
      "value_next [282.39685] td_target [269.277] td_error [15.341324]\n",
      "state: [-0.17711351 -1.78057459  0.23304812  2.84349603] reward: 1.0 action: 0\n",
      "value_next [327.89514] td_target [312.50037] td_error [15.528656]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 10/200\n",
      "===\n",
      "state: [ 0.01945795 -0.22768138  0.034387    0.31788179] reward: 1.0 action: 0\n",
      "value_next [107.01589] td_target [102.66509] td_error [23.002487]\n",
      "state: [ 0.01490432 -0.4232758   0.04074464  0.62120776] reward: 1.0 action: 0\n",
      "value_next [141.02441] td_target [134.97319] td_error [23.011726]\n",
      "state: [ 0.0064388  -0.61894235  0.05316879  0.92643961] reward: 1.0 action: 0\n",
      "value_next [178.40915] td_target [170.4887] td_error [23.031525]\n",
      "state: [-0.00594004 -0.81474042  0.07169758  1.23534594] reward: 1.0 action: 0\n",
      "value_next [219.59634] td_target [209.61653] td_error [23.09105]\n",
      "state: [-0.02223485 -1.01070686  0.0964045   1.54960143] reward: 1.0 action: 0\n",
      "value_next [265.11264] td_target [252.85701] td_error [23.203217]\n",
      "state: [-0.04244899 -1.20684438  0.12739653  1.87074065] reward: 1.0 action: 0\n",
      "value_next [315.5274] td_target [300.75104] td_error [23.365204]\n",
      "state: [-0.06658588 -1.40310807  0.16481134  2.20010376] reward: 1.0 action: 0\n",
      "value_next [371.41077] td_target [353.8402] td_error [23.5607]\n",
      "state: [-0.09464804 -1.59938971  0.20881342  2.53877207] reward: 1.0 action: 0\n",
      "value_next [433.3193] td_target [412.65332] td_error [23.761566]\n",
      "state: [-0.12663583 -1.79549988  0.25958886  2.88749332] reward: 1.0 action: 0\n",
      "value_next [501.79736] td_target [477.7075] td_error [23.928192]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 11/200\n",
      "===\n",
      "state: [-0.04025113 -0.20675065 -0.02555368  0.26435873] reward: 1.0 action: 0\n",
      "value_next [152.44032] td_target [145.8183] td_error [33.143127]\n",
      "state: [-0.04438614 -0.40149872 -0.0202665   0.54887351] reward: 1.0 action: 0\n",
      "value_next [200.99559] td_target [191.9458] td_error [33.040955]\n",
      "state: [-0.05241612 -0.5963302  -0.00928903  0.83510277] reward: 1.0 action: 0\n",
      "value_next [254.00645] td_target [242.30612] td_error [32.95404]\n",
      "state: [-0.06434272 -0.79132403  0.00741302  1.12484996] reward: 1.0 action: 0\n",
      "value_next [311.98804] td_target [297.38864] td_error [32.92325]\n",
      "state: [-0.0801692  -0.98654236  0.02991002  1.41984881] reward: 1.0 action: 0\n",
      "value_next [375.60797] td_target [357.82758] td_error [32.971436]\n",
      "state: [-0.09990005 -1.1820214   0.058307    1.72172846] reward: 1.0 action: 0\n",
      "value_next [445.62692] td_target [424.34558] td_error [33.103577]\n",
      "state: [-0.12354047 -1.37776049  0.09274157  2.0319707 ] reward: 1.0 action: 0\n",
      "value_next [522.8427] td_target [497.70056] td_error [33.30774]\n",
      "state: [-0.15109568 -1.57370887  0.13338098  2.35185711] reward: 1.0 action: 0\n",
      "value_next [608.05536] td_target [578.6526] td_error [33.556763]\n",
      "state: [-0.18256986 -1.76974977  0.18041812  2.68240443] reward: 1.0 action: 0\n",
      "value_next [702.0526] td_target [667.94995] td_error [33.807617]\n",
      "state: [-0.21796486 -1.96568214  0.23406621  3.02428827] reward: 1.0 action: 0\n",
      "value_next [805.5997] td_target [766.3197] td_error [33.999207]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 12/200\n",
      "====\n",
      "state: [ 0.00272239 -0.23873736 -0.04365897  0.26557157] reward: 1.0 action: 0\n",
      "value_next [226.3583] td_target [216.04039] td_error [49.497177]\n",
      "state: [-0.00205235 -0.43320987 -0.03834754  0.5441707 ] reward: 1.0 action: 0\n",
      "value_next [298.26318] td_target [284.35] td_error [49.203476]\n",
      "state: [-0.01071655 -0.62777255 -0.02746412  0.82452876] reward: 1.0 action: 0\n",
      "value_next [376.34412] td_target [358.52692] td_error [48.933807]\n",
      "state: [-0.023272   -0.82250829 -0.01097355  1.10844878] reward: 1.0 action: 0\n",
      "value_next [461.2646] td_target [439.20135] td_error [48.738617]\n",
      "state: [-0.03972217 -1.01748431  0.01119543  1.39766904] reward: 1.0 action: 0\n",
      "value_next [553.8863] td_target [527.19196] td_error [48.64627]\n",
      "state: [-0.06007185 -1.21274368  0.03914881  1.69383113] reward: 1.0 action: 0\n",
      "value_next [655.2017] td_target [623.44165] td_error [48.663696]\n",
      "state: [-0.08432673 -1.40829504  0.07302543  1.99844022] reward: 1.0 action: 0\n",
      "value_next [766.2713] td_target [728.9577] td_error [48.77777]\n",
      "state: [-0.11249263 -1.60410002  0.11299424  2.31281531] reward: 1.0 action: 0\n",
      "value_next [888.1784] td_target [844.7695] td_error [48.956055]\n",
      "state: [-0.14457463 -1.800058    0.15925054  2.63802748] reward: 1.0 action: 0\n",
      "value_next [1022.00635] td_target [971.906] td_error [49.145325]\n",
      "state: [-0.18057579 -1.99598824  0.21201109  2.97482603] reward: 1.0 action: 0\n",
      "value_next [1168.8132] td_target [1111.3726] td_error [49.266846]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 13/200\n",
      "====\n",
      "state: [-0.02039175 -0.18435916 -0.02985443  0.31558031] reward: 1.0 action: 0\n",
      "value_next [333.08957] td_target [317.4351] td_error [72.976456]\n",
      "state: [-0.02407893 -0.37904342 -0.02354282  0.59870066] reward: 1.0 action: 0\n",
      "value_next [438.2729] td_target [417.35925] td_error [72.34442]\n",
      "state: [-0.0316598  -0.57382819 -0.01156881  0.8838759 ] reward: 1.0 action: 0\n",
      "value_next [551.95325] td_target [525.3556] td_error [71.75687]\n",
      "state: [-0.04313636 -0.76879115  0.00610871  1.17289962] reward: 1.0 action: 0\n",
      "value_next [675.0309] td_target [642.27936] td_error [71.28125]\n",
      "state: [-0.05851218 -0.96399198  0.0295667   1.46749134] reward: 1.0 action: 0\n",
      "value_next [808.67114] td_target [769.23755] td_error [70.95349]\n",
      "state: [-0.07779202 -1.1594631   0.05891653  1.76926122] reward: 1.0 action: 0\n",
      "value_next [954.2146] td_target [907.50385] td_error [70.77893]\n",
      "state: [-0.10098129 -1.35519857  0.09430175  2.07966644] reward: 1.0 action: 0\n",
      "value_next [1113.0913] td_target [1058.4368] td_error [70.73364]\n",
      "state: [-0.12808526 -1.5511405   0.13589508  2.39995709] reward: 1.0 action: 0\n",
      "value_next [1286.7539] td_target [1223.4161] td_error [70.76355]\n",
      "state: [-0.15910807 -1.74716294  0.18389422  2.73111019] reward: 1.0 action: 0\n",
      "value_next [1476.6238] td_target [1403.7926] td_error [70.78064]\n",
      "state: [-0.19405133 -1.94305319  0.23851643  3.07375192] reward: 1.0 action: 0\n",
      "value_next [1684.0388] td_target [1600.8369] td_error [70.65698]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 14/200\n",
      "====\n",
      "state: [ 0.03104218 -0.17560692 -0.02752977  0.2361049 ] reward: 1.0 action: 0\n",
      "value_next [434.34332] td_target [413.62616] td_error [105.45218]\n",
      "state: [ 0.02753004 -0.37032496 -0.02280767  0.51997855] reward: 1.0 action: 0\n",
      "value_next [580.82904] td_target [552.7876] td_error [104.16287]\n",
      "state: [ 0.02012354 -0.56511853 -0.0124081   0.80538804] reward: 1.0 action: 0\n",
      "value_next [738.28076] td_target [702.3667] td_error [102.93207]\n",
      "state: [ 0.00882117 -0.7600682   0.00369966  1.09414215] reward: 1.0 action: 0\n",
      "value_next [907.7756] td_target [863.3868] td_error [101.851685]\n",
      "state: [-0.00638019 -0.9552387   0.0255825   1.38798358] reward: 1.0 action: 0\n",
      "value_next [1090.7491] td_target [1037.2117] td_error [100.97168]\n",
      "state: [-0.02548496 -1.15066994  0.05334217  1.68855497] reward: 1.0 action: 0\n",
      "value_next [1288.885] td_target [1225.4408] td_error [100.302]\n",
      "state: [-0.04849836 -1.34636643  0.08711327  1.99735734] reward: 1.0 action: 0\n",
      "value_next [1504.005] td_target [1429.8047] td_error [99.81433]\n",
      "state: [-0.07542569 -1.54228426  0.12706042  2.31569864] reward: 1.0 action: 0\n",
      "value_next [1737.9814] td_target [1652.0824] td_error [99.44348]\n",
      "state: [-0.10627138 -1.73831562  0.17337439  2.6446306 ] reward: 1.0 action: 0\n",
      "value_next [1992.6691] td_target [1894.0356] td_error [99.08191]\n",
      "state: [-0.14103769 -1.9342707   0.226267    2.98487375] reward: 1.0 action: 0\n",
      "value_next [2269.8372] td_target [2157.3452] td_error [98.57178]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 15/200\n",
      "====\n",
      "state: [-0.00750196 -0.23431448  0.00760987  0.335387  ] reward: 1.0 action: 0\n",
      "value_next [670.72766] td_target [638.1913] td_error [146.52911]\n",
      "state: [-0.01218825 -0.4295439   0.01431761  0.63045994] reward: 1.0 action: 0\n",
      "value_next [878.7119] td_target [835.7763] td_error [144.4206]\n",
      "state: [-0.02077913 -0.62486268  0.02692681  0.92761731] reward: 1.0 action: 0\n",
      "value_next [1101.5992] td_target [1047.5193] td_error [142.44061]\n",
      "state: [-0.03327638 -0.82033764  0.04547915  1.22863899] reward: 1.0 action: 0\n",
      "value_next [1340.9695] td_target [1274.921] td_error [140.69531]\n",
      "state: [-0.04968314 -1.01601439  0.07005193  1.53521687] reward: 1.0 action: 0\n",
      "value_next [1598.8262] td_target [1519.8849] td_error [139.22986]\n",
      "state: [-0.07000343 -1.21190653  0.10075627  1.84891283] reward: 1.0 action: 0\n",
      "value_next [1877.4425] td_target [1784.5703] td_error [138.03284]\n",
      "state: [-0.09424156 -1.40798309  0.13773453  2.17110829] reward: 1.0 action: 0\n",
      "value_next [2179.207] td_target [2071.2466] td_error [137.03455]\n",
      "state: [-0.12240122 -1.60415348  0.18115669  2.50294318] reward: 1.0 action: 0\n",
      "value_next [2506.4836] td_target [2382.1594] td_error [136.10278]\n",
      "state: [-0.15448429 -1.80025008  0.23121556  2.84524392] reward: 1.0 action: 0\n",
      "value_next [2861.4946] td_target [2719.42] td_error [135.03662]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 16/200\n",
      "=====\n",
      "state: [ 0.00393041 -0.14714254  0.03251686  0.34646768] reward: 1.0 action: 0\n",
      "value_next [845.4368] td_target [804.165] td_error [197.1399]\n",
      "state: [ 0.00098756 -0.34271157  0.03944621  0.64922442] reward: 1.0 action: 0\n",
      "value_next [1118.7588] td_target [1063.8208] td_error [194.0426]\n",
      "state: [-0.00586667 -0.53836017  0.0524307   0.95406321] reward: 1.0 action: 0\n",
      "value_next [1410.7992] td_target [1341.2593] td_error [191.12524]\n",
      "state: [-0.01663388 -0.73414679  0.07151197  1.26274714] reward: 1.0 action: 0\n",
      "value_next [1723.5131] td_target [1638.3374] td_error [188.52075]\n",
      "state: [-0.03131681 -0.93010648  0.09676691  1.57694201] reward: 1.0 action: 0\n",
      "value_next [2059.3962] td_target [1957.4264] td_error [186.28015]\n",
      "state: [-0.04991894 -1.12623921  0.12830575  1.89816984] reward: 1.0 action: 0\n",
      "value_next [2421.2698] td_target [2301.2063] td_error [184.37329]\n",
      "state: [-0.07244373 -1.32249623  0.16626915  2.22775403] reward: 1.0 action: 0\n",
      "value_next [2812.0686] td_target [2672.465] td_error [182.68945]\n",
      "state: [-0.09889365 -1.5187643   0.21082423  2.56675428] reward: 1.0 action: 0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "value_next [3234.6562] td_target [3073.9233] td_error [181.03589]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 17/200\n",
      "=====\n",
      "state: [ 0.00243437 -0.20865194  0.01336222  0.25249524] reward: 1.0 action: 0\n",
      "value_next [1022.5593] td_target [972.43134] td_error [244.54315]\n",
      "state: [-0.00173867 -0.40396212  0.01841212  0.54936273] reward: 1.0 action: 0\n",
      "value_next [1356.9601] td_target [1290.112] td_error [239.9862]\n",
      "state: [-0.00981792 -0.59933781  0.02939938  0.84778944] reward: 1.0 action: 0\n",
      "value_next [1712.8478] td_target [1628.2053] td_error [235.7096]\n",
      "state: [-0.02180467 -0.79484817  0.04635517  1.14957043] reward: 1.0 action: 0\n",
      "value_next [2092.5264] td_target [1988.9] td_error [231.87732]\n",
      "state: [-0.03770164 -0.99054348  0.06934657  1.45642161] reward: 1.0 action: 0\n",
      "value_next [2498.9521] td_target [2375.0044] td_error [228.5581]\n",
      "state: [-0.0575125  -1.18644456  0.09847501  1.76993851] reward: 1.0 action: 0\n",
      "value_next [2935.4963] td_target [2789.7214] td_error [225.73096]\n",
      "state: [-0.0812414  -1.38253051  0.13387378  2.09154734] reward: 1.0 action: 0\n",
      "value_next [3405.7097] td_target [3236.424] td_error [223.28833]\n",
      "state: [-0.10889201 -1.57872415  0.17570472  2.42244599] reward: 1.0 action: 0\n",
      "value_next [3913.1157] td_target [3718.46] td_error [221.0293]\n",
      "state: [-0.14046649 -1.77487511  0.22415364  2.7635342 ] reward: 1.0 action: 0\n",
      "value_next [4461.025] td_target [4238.9736] td_error [218.6543]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 18/200\n",
      "=====\n",
      "state: [-0.02046753 -0.24037197 -0.02753805  0.29476189] reward: 1.0 action: 0\n",
      "value_next [1338.3807] td_target [1272.4617] td_error [297.4355]\n",
      "state: [-0.02527497 -0.43509072 -0.02164281  0.57863411] reward: 1.0 action: 0\n",
      "value_next [1750.8622] td_target [1664.3191] td_error [291.7704]\n",
      "state: [-0.03397679 -0.62990277 -0.01007013  0.86442126] reward: 1.0 action: 0\n",
      "value_next [2189.3] td_target [2080.835] td_error [286.50793]\n",
      "state: [-0.04657484 -0.8248862   0.00721829  1.15392101] reward: 1.0 action: 0\n",
      "value_next [2656.406] td_target [2524.5857] td_error [281.8164]\n",
      "state: [-0.06307257 -1.02010155  0.03029671  1.44885857] reward: 1.0 action: 0\n",
      "value_next [3155.6155] td_target [2998.8347] td_error [277.76685]\n",
      "state: [-0.0834746  -1.21558257  0.05927388  1.75085141] reward: 1.0 action: 0\n",
      "value_next [3690.8555] td_target [3507.3127] td_error [274.3369]\n",
      "state: [-0.10778625 -1.41132518  0.09429091  2.061366  ] reward: 1.0 action: 0\n",
      "value_next [4266.3105] td_target [4053.9949] td_error [271.41016]\n",
      "state: [-0.13601275 -1.60727399  0.13551823  2.38166405] reward: 1.0 action: 0\n",
      "value_next [4886.206] td_target [4642.8955] td_error [268.7705]\n",
      "state: [-0.16815823 -1.80330628  0.18315151  2.71273701] reward: 1.0 action: 0\n",
      "value_next [5554.616] td_target [5277.8853] td_error [266.09326]\n",
      "state: [-0.20422436 -1.99921346  0.23740625  3.05522884] reward: 1.0 action: 0\n",
      "value_next [6275.2764] td_target [5962.5127] td_error [262.92188]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 19/200\n",
      "======\n",
      "state: [ 0.01810185 -0.24170952 -0.01958497  0.24345417] reward: 1.0 action: 0\n",
      "value_next [1636.8279] td_target [1555.9865] td_error [388.4099]\n",
      "state: [ 0.01326766 -0.43654633 -0.01471589  0.52989573] reward: 1.0 action: 0\n",
      "value_next [2163.7507] td_target [2056.5632] td_error [380.42517]\n",
      "state: [ 0.00453673 -0.63145821 -0.00411797  0.81790557] reward: 1.0 action: 0\n",
      "value_next [2722.2742] td_target [2587.1604] td_error [372.91675]\n",
      "state: [-0.00809243 -0.82652355  0.01224014  1.10929042] reward: 1.0 action: 0\n",
      "value_next [3315.473] td_target [3150.6992] td_error [366.09595]\n",
      "state: [-0.0246229  -1.02180419  0.03442595  1.40578795] reward: 1.0 action: 0\n",
      "value_next [3947.3154] td_target [3750.9497] td_error [360.05396]\n",
      "state: [-0.04505899 -1.21733616  0.06254171  1.70903135] reward: 1.0 action: 0\n",
      "value_next [4622.399] td_target [4392.279] td_error [354.7683]\n",
      "state: [-0.06940571 -1.41311865  0.09672233  2.0205062 ] reward: 1.0 action: 0\n",
      "value_next [5345.6934] td_target [5079.4087] td_error [350.10596]\n",
      "state: [-0.09766808 -1.60910074  0.13713246  2.34149736] reward: 1.0 action: 0\n",
      "value_next [6122.288] td_target [5817.174] td_error [345.81348]\n",
      "state: [-0.1298501  -1.8051655   0.1839624   2.67302424] reward: 1.0 action: 0\n",
      "value_next [6957.151] td_target [6610.2935] td_error [341.50293]\n",
      "state: [-0.16595341 -2.00111171  0.23742289  3.01576454] reward: 1.0 action: 0\n",
      "value_next [7854.8716] td_target [7463.128] td_error [336.62793]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 20/200\n",
      "======\n",
      "state: [ 0.02654765 -0.22007076 -0.02356204  0.30487869] reward: 1.0 action: 0\n",
      "value_next [2101.9397] td_target [1997.8427] td_error [487.84863]\n",
      "state: [ 0.02214623 -0.41484915 -0.01746446  0.59003857] reward: 1.0 action: 0\n",
      "value_next [2764.09] td_target [2626.8855] td_error [477.45996]\n",
      "state: [ 0.01384925 -0.60972227 -0.00566369  0.87716938] reward: 1.0 action: 0\n",
      "value_next [3464.6724] td_target [3292.4387] td_error [467.72192]\n",
      "state: [ 0.0016548  -0.80476679  0.0118797   1.16806636] reward: 1.0 action: 0\n",
      "value_next [4207.405] td_target [3998.0344] td_error [458.85645]\n",
      "state: [-0.01444053 -1.00004127  0.03524102  1.46444996] reward: 1.0 action: 0\n",
      "value_next [4997.0225] td_target [4748.1714] td_error [450.9453]\n",
      "state: [-0.03444136 -1.19557672  0.06453002  1.76792975] reward: 1.0 action: 0\n",
      "value_next [5838.9897] td_target [5548.04] td_error [443.93994]\n",
      "state: [-0.05835289 -1.39136535  0.09988862  2.0799601 ] reward: 1.0 action: 0\n",
      "value_next [6739.195] td_target [6403.235] td_error [437.6538]\n",
      "state: [-0.0861802  -1.58734684  0.14148782  2.4017854 ] reward: 1.0 action: 0\n",
      "value_next [7703.6694] td_target [7319.486] td_error [431.75488]\n",
      "state: [-0.11792714 -1.78339208  0.18952353  2.7343735 ] reward: 1.0 action: 0\n",
      "value_next [8738.281] td_target [8302.367] td_error [425.7456]\n",
      "state: [-0.15359498 -1.97928454  0.244211    3.07833768] reward: 1.0 action: 0\n",
      "value_next [9848.416] td_target [9356.995] td_error [418.92773]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 21/200\n",
      "======\n",
      "state: [ 0.01274766 -0.14845676 -0.03914667  0.24679756] reward: 1.0 action: 0\n",
      "value_next [2412.6975] td_target [2293.0625] td_error [607.77124]\n",
      "state: [ 0.00977853 -0.34299838 -0.03421072  0.52688025] reward: 1.0 action: 0\n",
      "value_next [3217.2827] td_target [3057.4185] td_error [593.6428]\n",
      "state: [ 0.00291856 -0.53762269 -0.02367311  0.8085899 ] reward: 1.0 action: 0\n",
      "value_next [4065.8308] td_target [3863.5393] td_error [580.3545]\n",
      "state: [-0.0078339  -0.73241237 -0.00750131  1.09373331] reward: 1.0 action: 0\n",
      "value_next [4962.43] td_target [4715.3086] td_error [568.1919]\n",
      "state: [-0.02248214 -0.9274347   0.01437335  1.38405321] reward: 1.0 action: 0\n",
      "value_next [5912.4272] td_target [5617.8057] td_error [557.2798]\n",
      "state: [-0.04103084 -1.12273294  0.04205442  1.68119593] reward: 1.0 action: 0\n",
      "value_next [6922.1094] td_target [6577.004] td_error [547.5918]\n",
      "state: [-0.0634855  -1.31831608  0.07567833  1.9866716 ] reward: 1.0 action: 0\n",
      "value_next [7998.385] td_target [7599.4653] td_error [538.9453]\n",
      "state: [-0.08985182 -1.51414619  0.11541177  2.3018042 ] reward: 1.0 action: 0\n",
      "value_next [9148.449] td_target [8692.026] td_error [530.9922]\n",
      "state: [-0.12013474 -1.71012325  0.16144785  2.62766996] reward: 1.0 action: 0\n",
      "value_next [10379.453] td_target [9861.48] td_error [523.1992]\n",
      "state: [-0.15433721 -1.90606727  0.21400125  2.96502351] reward: 1.0 action: 0\n",
      "value_next [11698.116] td_target [11114.21] td_error [514.80273]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 22/200\n",
      "=======\n",
      "state: [ 0.02956165 -0.24182208 -0.0279458   0.24149321] reward: 1.0 action: 0\n",
      "value_next [3101.718] td_target [2947.632] td_error [747.9292]\n",
      "state: [ 0.02472521 -0.43653392 -0.02311594  0.52523194] reward: 1.0 action: 0\n",
      "value_next [4096.218] td_target [3892.4067] td_error [728.8628]\n",
      "state: [ 0.01599453 -0.63132308 -0.0126113   0.81054214] reward: 1.0 action: 0\n",
      "value_next [5142.415] td_target [4886.2944] td_error [710.9668]\n",
      "state: [ 0.00336807 -0.82627     0.00359954  1.09923164] reward: 1.0 action: 0\n",
      "value_next [6245.424] td_target [5934.1523] td_error [694.5425]\n",
      "state: [-0.01315733 -1.02143915  0.02558418  1.39304173] reward: 1.0 action: 0\n",
      "value_next [7411.743] td_target [7042.156] td_error [679.6963]\n",
      "state: [-0.03358612 -1.21687006  0.05344501  1.69361309] reward: 1.0 action: 0\n",
      "value_next [8648.881] td_target [8217.437] td_error [666.3511]\n",
      "state: [-0.05792352 -1.41256668  0.08731727  2.00244424] reward: 1.0 action: 0\n",
      "value_next [9964.974] td_target [9467.725] td_error [654.25]\n",
      "state: [-0.08617485 -1.60848437  0.12736616  2.32083985] reward: 1.0 action: 0\n",
      "value_next [11368.415] td_target [10800.994] td_error [642.9365]\n",
      "state: [-0.11834454 -1.80451432  0.17378295  2.64984748] reward: 1.0 action: 0\n",
      "value_next [12867.46] td_target [12225.087] td_error [631.7256]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.15443482 -2.00046553  0.2267799   2.99018248] reward: 1.0 action: 0\n",
      "value_next [14469.768] td_target [13747.279] td_error [619.66016]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 23/200\n",
      "=======\n",
      "state: [-0.0143084  -0.17477768 -0.04214624  0.31189632] reward: 1.0 action: 0\n",
      "value_next [3826.9438] td_target [3636.5967] td_error [891.9851]\n",
      "state: [-0.01780395 -0.36927464 -0.03590831  0.59099536] reward: 1.0 action: 0\n",
      "value_next [5018.5186] td_target [4768.593] td_error [869.1096]\n",
      "state: [-0.02518945 -0.56387594 -0.02408841  0.87215436] reward: 1.0 action: 0\n",
      "value_next [6270.9683] td_target [5958.42] td_error [847.7549]\n",
      "state: [-0.03646696 -0.75866217 -0.00664532  1.15716763] reward: 1.0 action: 0\n",
      "value_next [7590.336] td_target [7211.819] td_error [828.23926]\n",
      "state: [-0.05164021 -0.95369687  0.01649803  1.44775952] reward: 1.0 action: 0\n",
      "value_next [8984.235] td_target [8536.023] td_error [810.6577]\n",
      "state: [-0.07071414 -1.14901776  0.04545322  1.74555118] reward: 1.0 action: 0\n",
      "value_next [10461.443] td_target [9939.371] td_error [794.9004]\n",
      "state: [-0.0936945  -1.34462606  0.08036425  2.05201924] reward: 1.0 action: 0\n",
      "value_next [12031.46] td_target [11430.887] td_error [780.63086]\n",
      "state: [-0.12058702 -1.54047346  0.12140463  2.36844386] reward: 1.0 action: 0\n",
      "value_next [13704.065] td_target [13019.862] td_error [767.2881]\n",
      "state: [-0.15139649 -1.73644637  0.16877351  2.6958449 ] reward: 1.0 action: 0\n",
      "value_next [15488.812] td_target [14715.371] td_error [754.01074]\n",
      "state: [-0.18612542 -1.93234765  0.22269041  3.03490581] reward: 1.0 action: 0\n",
      "value_next [17394.49] td_target [16525.766] td_error [739.6299]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 24/200\n",
      "=======\n",
      "state: [-0.00192453 -0.17869637  0.04545907  0.35561058] reward: 1.0 action: 0\n",
      "value_next [4858.6284] td_target [4616.697] td_error [1140.596]\n",
      "state: [-0.00549845 -0.37443419  0.05257128  0.66227435] reward: 1.0 action: 0\n",
      "value_next [6374.523] td_target [6056.797] td_error [1110.2705]\n",
      "state: [-0.01298714 -0.57024666  0.06581677  0.9710361 ] reward: 1.0 action: 0\n",
      "value_next [7965.7456] td_target [7568.458] td_error [1081.7451]\n",
      "state: [-0.02439207 -0.76618728  0.08523749  1.28364683] reward: 1.0 action: 0\n",
      "value_next [9640.108] td_target [9159.103] td_error [1055.3628]\n",
      "state: [-0.03971582 -0.96228489  0.11091043  1.60175484] reward: 1.0 action: 0\n",
      "value_next [11407.236] td_target [10837.874] td_error [1031.125]\n",
      "state: [-0.05896151 -1.15853149  0.14294553  1.9268569 ] reward: 1.0 action: 0\n",
      "value_next [13277.865] td_target [12614.972] td_error [1008.6953]\n",
      "state: [-0.08213214 -1.3548681   0.18148266  2.26024108] reward: 1.0 action: 0\n",
      "value_next [15263.121] td_target [14500.965] td_error [987.3906]\n",
      "state: [-0.10922951 -1.55116853  0.22668748  2.60291951] reward: 1.0 action: 0\n",
      "value_next [17373.758] td_target [16506.07] td_error [966.15234]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 25/200\n",
      "========\n",
      "state: [ 0.0023256  -0.21640829  0.04020124  0.33003789] reward: 1.0 action: 0\n",
      "value_next [5601.4243] td_target [5322.353] td_error [1308.5569]\n",
      "state: [-0.00200256 -0.41207877  0.046802    0.63512245] reward: 1.0 action: 0\n",
      "value_next [7336.472] td_target [6970.6484] td_error [1272.1577]\n",
      "state: [-0.01024414 -0.60782119  0.05950444  0.94216933] reward: 1.0 action: 0\n",
      "value_next [9154.538] td_target [8697.812] td_error [1238.0879]\n",
      "state: [-0.02240056 -0.80369232  0.07834783  1.25294001] reward: 1.0 action: 0\n",
      "value_next [11064.688] td_target [10512.454] td_error [1206.6992]\n",
      "state: [-0.03847441 -0.99972556  0.10340663  1.56909854] reward: 1.0 action: 0\n",
      "value_next [13077.959] td_target [12425.061] td_error [1177.9609]\n",
      "state: [-0.05846892 -1.19591906  0.1347886   1.89216419] reward: 1.0 action: 0\n",
      "value_next [15206.597] td_target [14447.267] td_error [1151.4912]\n",
      "state: [-0.0823873  -1.39222198  0.17263189  2.22345576] reward: 1.0 action: 0\n",
      "value_next [17463.264] td_target [16591.1] td_error [1126.5078]\n",
      "state: [-0.11023174 -1.58851852  0.217101    2.56402602] reward: 1.0 action: 0\n",
      "value_next [19860.252] td_target [18868.238] td_error [1101.8496]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 26/200\n",
      "========\n",
      "state: [ 0.00945776 -0.21609747  0.00187296  0.28761283] reward: 1.0 action: 0\n",
      "value_next [6134.994] td_target [5829.244] td_error [1462.9722]\n",
      "state: [ 0.00513581 -0.41124608  0.00762522  0.58088588] reward: 1.0 action: 0\n",
      "value_next [8059.6343] td_target [7657.6523] td_error [1421.2534]\n",
      "state: [-0.00308911 -0.60647404  0.01924294  0.8759611 ] reward: 1.0 action: 0\n",
      "value_next [10073.585] td_target [9570.905] td_error [1382.398]\n",
      "state: [-0.01521859 -0.8018522   0.03676216  1.17463116] reward: 1.0 action: 0\n",
      "value_next [12186.651] td_target [11578.318] td_error [1346.7988]\n",
      "state: [-0.03125563 -0.9974321   0.06025478  1.47860842] reward: 1.0 action: 0\n",
      "value_next [14410.83] td_target [13691.288] td_error [1314.4727]\n",
      "state: [-0.05120428 -1.1932357   0.08982695  1.78948501] reward: 1.0 action: 0\n",
      "value_next [16759.594] td_target [15922.614] td_error [1285.0898]\n",
      "state: [-0.07506899 -1.38924331  0.12561665  2.10868468] reward: 1.0 action: 0\n",
      "value_next [19247.145] td_target [18285.787] td_error [1257.9336]\n",
      "state: [-0.10285386 -1.58537913  0.16779034  2.43740449] reward: 1.0 action: 0\n",
      "value_next [21887.664] td_target [20794.281] td_error [1231.9023]\n",
      "state: [-0.13456144 -1.78149434  0.21653843  2.77654522] reward: 1.0 action: 0\n",
      "value_next [24694.473] td_target [23460.748] td_error [1205.4297]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 27/200\n",
      "========\n",
      "state: [-0.02964884 -0.18629518 -0.03300911  0.25948475] reward: 1.0 action: 0\n",
      "value_next [6862.0576] td_target [6519.9546] td_error [1649.1909]\n",
      "state: [-0.03337475 -0.38093073 -0.02781942  0.54157612] reward: 1.0 action: 0\n",
      "value_next [9022.314] td_target [8572.198] td_error [1601.3408]\n",
      "state: [-0.04099336 -0.57565085 -0.01698789  0.82536543] reward: 1.0 action: 0\n",
      "value_next [11280.614] td_target [10717.583] td_error [1556.874]\n",
      "state: [-5.25063786e-02 -7.70536393e-01 -4.80584007e-04  1.11265736e+00] reward: 1.0 action: 0\n",
      "value_next [13647.483] td_target [12966.109] td_error [1516.2529]\n",
      "state: [-0.06791711 -0.96565203  0.02177256  1.40518949] reward: 1.0 action: 0\n",
      "value_next [16135.917] td_target [15330.121] td_error [1479.5586]\n",
      "state: [-0.08723015 -1.16103742  0.04987635  1.70459874] reward: 1.0 action: 0\n",
      "value_next [18760.682] td_target [17823.646] td_error [1446.5029]\n",
      "state: [-0.1104509  -1.35669657  0.08396833  2.01238006] reward: 1.0 action: 0\n",
      "value_next [21537.59] td_target [20461.71] td_error [1416.4375]\n",
      "state: [-0.13758483 -1.55258491  0.12421593  2.32983502] reward: 1.0 action: 0\n",
      "value_next [24482.71] td_target [23259.574] td_error [1388.2734]\n",
      "state: [-0.16863652 -1.74859375  0.17081263  2.65800858] reward: 1.0 action: 0\n",
      "value_next [27611.574] td_target [26231.996] td_error [1360.4805]\n",
      "state: [-0.2036084  -1.94453213  0.2239728   2.99761397] reward: 1.0 action: 0\n",
      "value_next [30938.242] td_target [29392.33] td_error [1330.959]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 28/200\n",
      "========\n",
      "state: [-0.04289566 -0.17053617 -0.02162267  0.23665108] reward: 1.0 action: 0\n",
      "value_next [7896.603] td_target [7502.773] td_error [1947.3662]\n",
      "state: [-0.04630639 -0.36534264 -0.01688965  0.5224359 ] reward: 1.0 action: 0\n",
      "value_next [10427.213] td_target [9906.853] td_error [1889.6499]\n",
      "state: [-0.05361324 -0.56022284 -0.00644093  0.80974919] reward: 1.0 action: 0\n",
      "value_next [13069.659] td_target [12417.176] td_error [1835.8682]\n",
      "state: [-0.0648177  -0.75525595  0.00975405  1.10039917] reward: 1.0 action: 0\n",
      "value_next [15835.759] td_target [15044.971] td_error [1786.5684]\n",
      "state: [-0.07992282 -0.9505049   0.03176204  1.3961263 ] reward: 1.0 action: 0\n",
      "value_next [18740.148] td_target [17804.14] td_error [1741.8281]\n",
      "state: [-0.09893291 -1.14600724  0.05968456  1.69856838] reward: 1.0 action: 0\n",
      "value_next [21799.47] td_target [20710.496] td_error [1701.2969]\n",
      "state: [-0.12185306 -1.34176427  0.09365593  2.00921791] reward: 1.0 action: 0\n",
      "value_next [25031.55] td_target [23780.973] td_error [1664.1914]\n",
      "state: [-0.14868834 -1.53772781  0.13384029  2.32936961] reward: 1.0 action: 0\n",
      "value_next [28454.512] td_target [27032.785] td_error [1629.2363]\n",
      "state: [-0.1794429  -1.73378455  0.18042768  2.66005633] reward: 1.0 action: 0\n",
      "value_next [32085.812] td_target [30482.521] td_error [1594.6133]\n",
      "state: [-0.21411859 -1.92973779  0.23362881  3.00197322] reward: 1.0 action: 0\n",
      "value_next [35941.18] td_target [34145.12] td_error [1557.8594]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 29/200\n",
      "=========\n",
      "state: [ 0.03930997 -0.14834046  0.01775279  0.30489129] reward: 1.0 action: 0\n",
      "value_next [9219.919] td_target [8759.923] td_error [2331.3223]\n",
      "state: [ 0.03634316 -0.34371084  0.02385062  0.60311974] reward: 1.0 action: 0\n",
      "value_next [12228.393] td_target [11617.973] td_error [2263.5938]\n",
      "state: [ 0.02946894 -0.53915812  0.03591301  0.90321869] reward: 1.0 action: 0\n",
      "value_next [15369.297] td_target [14601.832] td_error [2200.293]\n",
      "state: [ 0.01868578 -0.73474765  0.05397739  1.20696996] reward: 1.0 action: 0\n",
      "value_next [18656.4] td_target [17724.58] td_error [2141.9102]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [ 0.00399083 -0.93052384  0.07811679  1.51606819] reward: 1.0 action: 0\n",
      "value_next [22106.54] td_target [21002.21] td_error [2088.379]\n",
      "state: [-0.01461965 -1.12649923  0.10843815  1.8320778 ] reward: 1.0 action: 0\n",
      "value_next [25738.574] td_target [24452.645] td_error [2039.0684]\n",
      "state: [-0.03714963 -1.32264168  0.14507971  2.15638174] reward: 1.0 action: 0\n",
      "value_next [29572.3] td_target [28094.686] td_error [1992.7793]\n",
      "state: [-0.06360247 -1.51885927  0.18820734  2.49012001] reward: 1.0 action: 0\n",
      "value_next [33627.266] td_target [31946.902] td_error [1947.666]\n",
      "state: [-0.09397965 -1.71498291  0.23800974  2.83411738] reward: 1.0 action: 0\n",
      "value_next [37921.53] td_target [36026.453] td_error [1901.1914]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 30/200\n",
      "=========\n",
      "state: [ 0.00568963 -0.18677275 -0.00570989  0.2588671 ] reward: 1.0 action: 0\n",
      "value_next [10469.005] td_target [9946.555] td_error [2602.7358]\n",
      "state: [ 1.95417437e-03 -3.81812723e-01 -5.32551524e-04  5.49743587e-01] reward: 1.0 action: 0\n",
      "value_next [13829.611] td_target [13139.131] td_error [2523.3457]\n",
      "state: [-0.00568208 -0.57692719  0.01046232  0.84225868] reward: 1.0 action: 0\n",
      "value_next [17331.89] td_target [16466.295] td_error [2449.3545]\n",
      "state: [-0.01722062 -0.77219037  0.02730749  1.13821328] reward: 1.0 action: 0\n",
      "value_next [20991.145] td_target [19942.588] td_error [2381.3223]\n",
      "state: [-0.03266443 -0.96765858  0.05007176  1.43933369] reward: 1.0 action: 0\n",
      "value_next [24826.016] td_target [23585.715] td_error [2319.2148]\n",
      "state: [-0.0520176  -1.1633604   0.07885843  1.74723345] reward: 1.0 action: 0\n",
      "value_next [28857.467] td_target [27415.594] td_error [2262.4492]\n",
      "state: [-0.07528481 -1.35928503  0.1138031   2.06336743] reward: 1.0 action: 0\n",
      "value_next [33107.703] td_target [31453.318] td_error [2209.834]\n",
      "state: [-0.10247051 -1.55536833  0.15507045  2.38897575] reward: 1.0 action: 0\n",
      "value_next [37599.055] td_target [35720.1] td_error [2159.5586]\n",
      "state: [-0.13357788 -1.75147646  0.20284997  2.7250164 ] reward: 1.0 action: 0\n",
      "value_next [42352.703] td_target [40236.066] td_error [2109.0508]\n",
      "state: [-0.16860741 -1.94738711  0.25735029  3.0720868 ] reward: 1.0 action: 0\n",
      "value_next [47387.31] td_target [45018.94] td_error [2054.9414]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 31/200\n",
      "=========\n",
      "state: [ 0.03871757 -0.17584808 -0.02218885  0.2970195 ] reward: 1.0 action: 0\n",
      "value_next [11931.829] td_target [11336.237] td_error [2946.9912]\n",
      "state: [ 0.03520061 -0.3706468  -0.01624846  0.58262268] reward: 1.0 action: 0\n",
      "value_next [15737.38] td_target [14951.511] td_error [2858.079]\n",
      "state: [ 0.02778767 -0.56553739 -0.004596    0.87014313] reward: 1.0 action: 0\n",
      "value_next [19703.188] td_target [18719.027] td_error [2775.4053]\n",
      "state: [ 0.01647692 -0.76059652  0.01280686  1.16137752] reward: 1.0 action: 0\n",
      "value_next [23846.072] td_target [22654.768] td_error [2699.4746]\n",
      "state: [ 1.26499105e-03 -9.55882923e-01  3.60344096e-02  1.45804822e+00] reward: 1.0 action: 0\n",
      "value_next [28186.344] td_target [26778.025] td_error [2630.1758]\n",
      "state: [-0.01785267 -1.15142792  0.06519537  1.76176712] reward: 1.0 action: 0\n",
      "value_next [32746.775] td_target [31110.436] td_error [2566.8164]\n",
      "state: [-0.04088123 -1.34722414  0.10043072  2.07399137] reward: 1.0 action: 0\n",
      "value_next [37551.523] td_target [35674.945] td_error [2508.0703]\n",
      "state: [-0.06782571 -1.54321183  0.14191054  2.39596858] reward: 1.0 action: 0\n",
      "value_next [42624.96] td_target [40494.71] td_error [2451.9102]\n",
      "state: [-0.09868995 -1.73926269  0.18982992  2.7286705 ] reward: 1.0 action: 0\n",
      "value_next [47990.37] td_target [45591.85] td_error [2395.5156]\n",
      "state: [-0.1334752  -1.93516113  0.24440333  3.07271498] reward: 1.0 action: 0\n",
      "value_next [53668.496] td_target [50986.07] td_error [2335.1562]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 32/200\n",
      "==========\n",
      "state: [ 0.04624609 -0.20358515  0.02493885  0.30253923] reward: 1.0 action: 0\n",
      "value_next [14102.708] td_target [13398.572] td_error [3473.168]\n",
      "state: [ 0.04217438 -0.39905349  0.03098964  0.60298183] reward: 1.0 action: 0\n",
      "value_next [18579.91] td_target [17651.914] td_error [3364.9834]\n",
      "state: [ 0.03419331 -0.59459487  0.04304927  0.90526256] reward: 1.0 action: 0\n",
      "value_next [23239.852] td_target [22078.86] td_error [3263.7598]\n",
      "state: [ 0.02230142 -0.79027252  0.06115452  1.21115968] reward: 1.0 action: 0\n",
      "value_next [28101.963] td_target [26697.865] td_error [3170.0273]\n",
      "state: [ 0.00649597 -0.98612834  0.08537772  1.52236245] reward: 1.0 action: 0\n",
      "value_next [33189.523] td_target [31531.047] td_error [3083.5215]\n",
      "state: [-0.0132266  -1.18217167  0.11582497  1.84042689] reward: 1.0 action: 0\n",
      "value_next [38528.293] td_target [36602.88] td_error [3003.1875]\n",
      "state: [-0.03687003 -1.37836628  0.15263351  2.16672344] reward: 1.0 action: 0\n",
      "value_next [44145.07] td_target [41938.816] td_error [2927.1797]\n",
      "state: [-0.06443736 -1.57461506  0.19596797  2.50237446] reward: 1.0 action: 0\n",
      "value_next [50066.12] td_target [47563.812] td_error [2852.7188]\n",
      "state: [-0.09592966 -1.77074242  0.24601546  2.84818111] reward: 1.0 action: 0\n",
      "value_next [56315.504] td_target [53500.727] td_error [2776.0547]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 33/200\n",
      "==========\n",
      "state: [-0.02785029 -0.19730559 -0.0286503   0.26606653] reward: 1.0 action: 0\n",
      "value_next [15529.042] td_target [14753.59] td_error [3735.2979]\n",
      "state: [-0.0317964  -0.39200717 -0.02332897  0.549577  ] reward: 1.0 action: 0\n",
      "value_next [20353.064] td_target [19336.41] td_error [3614.416]\n",
      "state: [-0.03963655 -0.58679379 -0.01233743  0.83481933] reward: 1.0 action: 0\n",
      "value_next [25366.965] td_target [24099.617] td_error [3502.1816]\n",
      "state: [-0.05137242 -0.78174503  0.00435896  1.12359684] reward: 1.0 action: 0\n",
      "value_next [30591.994] td_target [29063.395] td_error [3399.1855]\n",
      "state: [-0.06700732 -0.97692386  0.02683089  1.41764382] reward: 1.0 action: 0\n",
      "value_next [36053.594] td_target [34251.914] td_error [3305.289]\n",
      "state: [-0.0865458  -1.17236753  0.05518377  1.71859114] reward: 1.0 action: 0\n",
      "value_next [41780.164] td_target [39692.156] td_error [3219.6328]\n",
      "state: [-0.10999315 -1.36807679  0.08955559  2.02792396] reward: 1.0 action: 0\n",
      "value_next [47801.8] td_target [45412.71] td_error [3140.5977]\n",
      "state: [-0.13735469 -1.56400274  0.13011407  2.34692931] reward: 1.0 action: 0\n",
      "value_next [54148.88] td_target [51442.434] td_error [3065.707]\n",
      "state: [-0.16863474 -1.76003104  0.17705266  2.67663193] reward: 1.0 action: 0\n",
      "value_next [60850.465] td_target [57808.94] td_error [2991.5195]\n",
      "state: [-0.20383536 -1.95596363  0.2305853   3.01771828] reward: 1.0 action: 0\n",
      "value_next [67932.516] td_target [64536.89] td_error [2913.4531]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 34/200\n",
      "==========\n",
      "state: [-0.01834445 -0.21313667 -0.04232397  0.27842736] reward: 1.0 action: 0\n",
      "value_next [17632.547] td_target [16751.92] td_error [4161.8037]\n",
      "state: [-0.02260718 -0.40763008 -0.03675542  0.55746668] reward: 1.0 action: 0\n",
      "value_next [23019.943] td_target [21869.945] td_error [4025.9277]\n",
      "state: [-0.03075979 -0.60221729 -0.02560609  0.8383467 ] reward: 1.0 action: 0\n",
      "value_next [28616.822] td_target [27186.98] td_error [3900.1465]\n",
      "state: [-0.04280413 -0.79698039 -0.00883916  1.12286821] reward: 1.0 action: 0\n",
      "value_next [34446.89] td_target [32725.545] td_error [3784.9648]\n",
      "state: [-0.05874374 -0.99198534  0.01361821  1.41276553] reward: 1.0 action: 0\n",
      "value_next [40538.21] td_target [38512.3] td_error [3680.164]\n",
      "state: [-0.07858345 -1.1872734   0.04187352  1.70967409] reward: 1.0 action: 0\n",
      "value_next [46921.938] td_target [44576.84] td_error [3584.7266]\n",
      "state: [-0.10232891 -1.38285073  0.076067    2.01509014] reward: 1.0 action: 0\n",
      "value_next [53631.008] td_target [50950.457] td_error [3496.8516]\n",
      "state: [-0.12998593 -1.57867559  0.1163688   2.33032017] reward: 1.0 action: 0\n",
      "value_next [60698.754] td_target [57664.816] td_error [3413.8594]\n",
      "state: [-0.16155944 -1.77464299  0.16297521  2.65641848] reward: 1.0 action: 0\n",
      "value_next [68157.234] td_target [64750.37] td_error [3332.0117]\n",
      "state: [-0.1970523  -1.97056662  0.21610358  2.99411251] reward: 1.0 action: 0\n",
      "value_next [76035.34] td_target [72234.58] td_error [3246.3594]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 35/200\n",
      "==========\n",
      "state: [-0.04062252 -0.22036132 -0.04338265  0.26475311] reward: 1.0 action: 0\n",
      "value_next [19891.12] td_target [18897.562] td_error [4666.5654]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.04502974 -0.41483808 -0.03808759  0.54344328] reward: 1.0 action: 0\n",
      "value_next [25927.262] td_target [24631.898] td_error [4510.6484]\n",
      "state: [-0.0533265  -0.60940465 -0.02721872  0.82388644] reward: 1.0 action: 0\n",
      "value_next [32192.135] td_target [30583.527] td_error [4366.3164]\n",
      "state: [-0.0655146  -0.80414391 -0.010741    1.10788581] reward: 1.0 action: 0\n",
      "value_next [38712.113] td_target [36777.508] td_error [4234.1055]\n",
      "state: [-0.08159748 -0.99912305  0.01141672  1.39717985] reward: 1.0 action: 0\n",
      "value_next [45518.297] td_target [43243.383] td_error [4113.7305]\n",
      "state: [-0.10157994 -1.19438511  0.03936032  1.6934103 ] reward: 1.0 action: 0\n",
      "value_next [52645.09] td_target [50013.836] td_error [4004.0625]\n",
      "state: [-0.12546764 -1.38993872  0.07322852  1.99808247] reward: 1.0 action: 0\n",
      "value_next [60128.83] td_target [57123.387] td_error [3903.082]\n",
      "state: [-0.15326641 -1.58574547  0.11319017  2.31251539] reward: 1.0 action: 0\n",
      "value_next [68006.22] td_target [64606.906] td_error [3807.8086]\n",
      "state: [-0.18498132 -1.78170469  0.15944048  2.63778016] reward: 1.0 action: 0\n",
      "value_next [76312.56] td_target [72497.93] td_error [3714.1094]\n",
      "state: [-0.22061542 -1.97763559  0.21219608  2.97462596] reward: 1.0 action: 0\n",
      "value_next [85079.69] td_target [80826.7] td_error [3616.5234]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 36/200\n",
      "===========\n",
      "state: [ 0.02090695 -0.17754108 -0.03934247  0.28995961] reward: 1.0 action: 0\n",
      "value_next [21465.176] td_target [20392.916] td_error [5271.3135]\n",
      "state: [ 0.01735613 -0.37208059 -0.03354328  0.56997936] reward: 1.0 action: 0\n",
      "value_next [28209.49] td_target [26800.016] td_error [5096.9043]\n",
      "state: [ 0.00991452 -0.56671645 -0.02214369  0.85190905] reward: 1.0 action: 0\n",
      "value_next [35206.754] td_target [33447.414] td_error [4935.4355]\n",
      "state: [-0.00141981 -0.76152962 -0.00510551  1.13754746] reward: 1.0 action: 0\n",
      "value_next [42485.824] td_target [40362.53] td_error [4787.383]\n",
      "state: [-0.0166504  -0.95658443  0.01764544  1.42862484] reward: 1.0 action: 0\n",
      "value_next [50080.582] td_target [47577.55] td_error [4652.3086]\n",
      "state: [-0.03578209 -1.1519198   0.04621794  1.72676978] reward: 1.0 action: 0\n",
      "value_next [58028.43] td_target [55128.008] td_error [4528.832]\n",
      "state: [-0.05882049 -1.34753869  0.08075333  2.03346801] reward: 1.0 action: 0\n",
      "value_next [66368.734] td_target [63051.297] td_error [4414.5938]\n",
      "state: [-0.08577126 -1.54339507  0.12142269  2.35001088] reward: 1.0 action: 0\n",
      "value_next [75141.14] td_target [71385.086] td_error [4306.133]\n",
      "state: [-0.11663916 -1.73937834  0.16842291  2.67743196] reward: 1.0 action: 0\n",
      "value_next [84383.61] td_target [80165.43] td_error [4198.703]\n",
      "state: [-0.15142673 -1.93529514  0.22197155  3.0164315 ] reward: 1.0 action: 0\n",
      "value_next [94130.07] td_target [89424.56] td_error [4086.0547]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 37/200\n",
      "===========\n",
      "state: [-0.02608908 -0.19539326 -0.02035389  0.25837935] reward: 1.0 action: 0\n",
      "value_next [24380.46] td_target [23162.438] td_error [5945.8125]\n",
      "state: [-0.02999694 -0.39021881 -0.0151863   0.54457351] reward: 1.0 action: 0\n",
      "value_next [31982.855] td_target [30384.713] td_error [5741.918]\n",
      "state: [-0.03780132 -0.58512411 -0.00429483  0.83243314] reward: 1.0 action: 0\n",
      "value_next [39860.28] td_target [37868.266] td_error [5552.662]\n",
      "state: [-0.0495038  -0.7801871   0.01235383  1.12376226] reward: 1.0 action: 0\n",
      "value_next [48045.035] td_target [45643.78] td_error [5378.6523]\n",
      "state: [-0.06510754 -0.97546881  0.03482908  1.4202944 ] reward: 1.0 action: 0\n",
      "value_next [56575.027] td_target [53747.273] td_error [5219.375]\n",
      "state: [-0.08461692 -1.17100398  0.06323497  1.72365695] reward: 1.0 action: 0\n",
      "value_next [65491.97] td_target [62218.37] td_error [5073.2266]\n",
      "state: [-0.108037   -1.36679002  0.0977081   2.03532774] reward: 1.0 action: 0\n",
      "value_next [74839.61] td_target [71098.625] td_error [4937.453]\n",
      "state: [-0.1353728  -1.56277362  0.13841466  2.35658148] reward: 1.0 action: 0\n",
      "value_next [84661.62] td_target [80429.54] td_error [4808.008]\n",
      "state: [-0.16662827 -1.75883474  0.18554629  2.68842456] reward: 1.0 action: 0\n",
      "value_next [94999.37] td_target [90250.4] td_error [4679.383]\n",
      "state: [-0.20180497 -1.95476825  0.23931478  3.03151835] reward: 1.0 action: 0\n",
      "value_next [105889.13] td_target [100595.67] td_error [4544.336]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 38/200\n",
      "===========\n",
      "state: [ 0.00945677 -0.22762986  0.03471603  0.26100407] reward: 1.0 action: 0\n",
      "value_next [27805.887] td_target [26416.592] td_error [6874.0664]\n",
      "state: [ 0.00490417 -0.42322974  0.03993611  0.56443157] reward: 1.0 action: 0\n",
      "value_next [36560.73] td_target [34733.695] td_error [6635.5723]\n",
      "state: [-0.00356042 -0.61888859  0.05122475  0.86942393] reward: 1.0 action: 0\n",
      "value_next [45626.406] td_target [43346.086] td_error [6412.9727]\n",
      "state: [-0.01593819 -0.81466858  0.06861322  1.1777622 ] reward: 1.0 action: 0\n",
      "value_next [55039.465] td_target [52288.492] td_error [6206.836]\n",
      "state: [-0.03223156 -1.01061129  0.09216847  1.49114116] reward: 1.0 action: 0\n",
      "value_next [64842.5] td_target [61601.375] td_error [6016.2734]\n",
      "state: [-0.05244379 -1.20672638  0.12199129  1.81112436] reward: 1.0 action: 0\n",
      "value_next [75081.72] td_target [71328.63] td_error [5838.9297]\n",
      "state: [-0.07657832 -1.40297849  0.15821378  2.13909134] reward: 1.0 action: 0\n",
      "value_next [85804.54] td_target [81515.31] td_error [5671.0234]\n",
      "state: [-0.10463789 -1.59927191  0.20099561  2.47617503] reward: 1.0 action: 0\n",
      "value_next [97056.82] td_target [92204.98] td_error [5507.0547]\n",
      "state: [-0.13662333 -1.79543307  0.25051911  2.82318881] reward: 1.0 action: 0\n",
      "value_next [108879.82] td_target [103436.83] td_error [5339.7344]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 39/200\n",
      "============\n",
      "state: [ 0.0438654  -0.20978439  0.01822731  0.34034393] reward: 1.0 action: 0\n",
      "value_next [30970.416] td_target [29422.895] td_error [7423.291]\n",
      "state: [ 0.03966972 -0.40516088  0.02503419  0.63871851] reward: 1.0 action: 0\n",
      "value_next [40482.453] td_target [38459.33] td_error [7171.6836]\n",
      "state: [ 0.0315665  -0.60062278  0.03780856  0.93917878] reward: 1.0 action: 0\n",
      "value_next [50337.332] td_target [47821.465] td_error [6937.875]\n",
      "state: [ 0.01955404 -0.79623347  0.05659213  1.24349819] reward: 1.0 action: 0\n",
      "value_next [60575.246] td_target [57547.484] td_error [6722.0312]\n",
      "state: [ 0.00362937 -0.99203414  0.0814621   1.55335821] reward: 1.0 action: 0\n",
      "value_next [71242.27] td_target [67681.16] td_error [6522.8555]\n",
      "state: [-0.01621131 -1.1880327   0.11252926  1.87030447] reward: 1.0 action: 0\n",
      "value_next [82387.93] td_target [78269.53] td_error [6337.5938]\n",
      "state: [-0.03997196 -1.38419069  0.14993535  2.19569431] reward: 1.0 action: 0\n",
      "value_next [94062.65] td_target [89360.516] td_error [6161.883]\n",
      "state: [-0.06765578 -1.58040792  0.19384924  2.53063405] reward: 1.0 action: 0\n",
      "value_next [106315.01] td_target [101000.26] td_error [5989.6406]\n",
      "state: [-0.09926394 -1.77650479  0.24446192  2.87590542] reward: 1.0 action: 0\n",
      "value_next [119188.484] td_target [113230.06] td_error [5812.7656]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 40/200\n",
      "============\n",
      "state: [-0.04822869 -0.16354503  0.04028266  0.33686448] reward: 1.0 action: 0\n",
      "value_next [34526.58] td_target [32801.25] td_error [8210.732]\n",
      "state: [-0.05149959 -0.35921639  0.04701995  0.64197331] reward: 1.0 action: 0\n",
      "value_next [45048.92] td_target [42797.477] td_error [7928.3633]\n",
      "state: [-0.05868392 -0.55496114  0.05985942  0.9490847 ] reward: 1.0 action: 0\n",
      "value_next [55943.215] td_target [53147.055] td_error [7665.2617]\n",
      "state: [-0.06978314 -0.75083565  0.07884111  1.25995839] reward: 1.0 action: 0\n",
      "value_next [67253.44] td_target [63891.766] td_error [7421.703]\n",
      "state: [-0.08479985 -0.94687266  0.10404028  1.57625575] reward: 1.0 action: 0\n",
      "value_next [79030.15] td_target [75079.64] td_error [7196.242]\n",
      "state: [-0.1037373  -1.14306939  0.1355654   1.89949218] reward: 1.0 action: 0\n",
      "value_next [91327.516] td_target [86762.14] td_error [6985.664]\n",
      "state: [-0.12659869 -1.33937371  0.17355524  2.23098131] reward: 1.0 action: 0\n",
      "value_next [104200.37] td_target [98991.34] td_error [6784.867]\n",
      "state: [-0.15338617 -1.53566817  0.21817487  2.57176909] reward: 1.0 action: 0\n",
      "value_next [117700.75] td_target [111816.71] td_error [6586.789]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 41/200\n",
      "============\n",
      "state: [-0.04791072 -0.16501608 -0.00125365  0.25219395] reward: 1.0 action: 0\n",
      "value_next [34862.39] td_target [33120.27] td_error [8683.076]\n",
      "state: [-0.05121104 -0.36012011  0.00379023  0.54448119] reward: 1.0 action: 0\n",
      "value_next [45860.63] td_target [43568.598] td_error [8374.793]\n",
      "state: [-0.05841344 -0.55529512  0.01467985  0.83835592] reward: 1.0 action: 0\n",
      "value_next [57230.004] td_target [54369.504] td_error [8089.3047]\n",
      "state: [-0.06951935 -0.75061442  0.03144697  1.13561903] reward: 1.0 action: 0\n",
      "value_next [69017.6] td_target [65567.72] td_error [7827.0977]\n",
      "state: [-0.08453163 -0.94613337  0.05415935  1.43799634] reward: 1.0 action: 0\n",
      "value_next [81277.59] td_target [77214.71] td_error [7587.0156]\n",
      "state: [-0.1034543  -1.14187935  0.08291928  1.74709985] reward: 1.0 action: 0\n",
      "value_next [94068.7] td_target [89366.266] td_error [7366.2734]\n",
      "state: [-0.12629189 -1.33784004  0.11786127  2.06438129] reward: 1.0 action: 0\n",
      "value_next [107451.516] td_target [102079.94] td_error [7160.461]\n",
      "state: [-0.15304869 -1.53394937  0.1591489   2.39107554] reward: 1.0 action: 0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "value_next [121485.55] td_target [115412.266] td_error [6963.2344]\n",
      "state: [-0.18372768 -1.73007103  0.20697041  2.72813282] reward: 1.0 action: 0\n",
      "value_next [136225.7] td_target [129415.414] td_error [6766.1094]\n",
      "state: [-0.2183291  -1.92597972  0.26153307  3.07614001] reward: 1.0 action: 0\n",
      "value_next [151718.62] td_target [144133.69] td_error [6558.2188]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 42/200\n",
      "=============\n",
      "state: [ 0.03297389 -0.2195791   0.0467155   0.27423845] reward: 1.0 action: 0\n",
      "value_next [39433.703] td_target [37463.016] td_error [9843.248]\n",
      "state: [ 0.0285823  -0.41533538  0.05220027  0.58128189] reward: 1.0 action: 0\n",
      "value_next [51896.332] td_target [49302.516] td_error [9498.645]\n",
      "state: [ 0.0202756  -0.61114839  0.06382591  0.88994124] reward: 1.0 action: 0\n",
      "value_next [64784.13] td_target [61545.92] td_error [9177.3125]\n",
      "state: [ 0.00805263 -0.80707561  0.08162473  1.20198573] reward: 1.0 action: 0\n",
      "value_next [78147.914] td_target [74241.516] td_error [8879.566]\n",
      "state: [-0.00808888 -1.00315269  0.10566444  1.51909313] reward: 1.0 action: 0\n",
      "value_next [92045.9] td_target [87444.6] td_error [8603.719]\n",
      "state: [-0.02815194 -1.19938161  0.13604631  1.84280245] reward: 1.0 action: 0\n",
      "value_next [106540.35] td_target [101214.336] td_error [8346.047]\n",
      "state: [-0.05213957 -1.39571707  0.17290236  2.17445877] reward: 1.0 action: 0\n",
      "value_next [121694.234] td_target [115610.52] td_error [8100.6797]\n",
      "state: [-0.08005391 -1.59205074  0.21639153  2.51514854] reward: 1.0 action: 0\n",
      "value_next [137567.5] td_target [130690.125] td_error [7859.5234]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 43/200\n",
      "=============\n",
      "state: [-0.01226338 -0.21663248  0.03051083  0.34013284] reward: 1.0 action: 0\n",
      "value_next [44575.793] td_target [42348.004] td_error [10359.936]\n",
      "state: [-0.01659603 -0.41217497  0.03731349  0.64227886] reward: 1.0 action: 0\n",
      "value_next [57868.137] td_target [54975.73] td_error [9997.883]\n",
      "state: [-0.02483953 -0.60779662  0.05015907  0.94647471] reward: 1.0 action: 0\n",
      "value_next [71614.65] td_target [68034.914] td_error [9661.887]\n",
      "state: [-0.03699546 -0.80355689  0.06908856  1.25448639] reward: 1.0 action: 0\n",
      "value_next [85871.85] td_target [81579.26] td_error [9351.859]\n",
      "state: [-0.0530666  -0.99949221  0.09417829  1.56798409] reward: 1.0 action: 0\n",
      "value_next [100703.45] td_target [95669.28] td_error [9065.625]\n",
      "state: [-0.07305644 -1.19560438  0.12553797  1.88849625] reward: 1.0 action: 0\n",
      "value_next [116177.02] td_target [110369.17] td_error [8798.992]\n",
      "state: [-0.09696853 -1.39184711  0.1633079   2.21735512] reward: 1.0 action: 0\n",
      "value_next [132360.53] td_target [125743.5] td_error [8545.492]\n",
      "state: [-0.12480547 -1.58811024  0.207655    2.55563226] reward: 1.0 action: 0\n",
      "value_next [149318.39] td_target [141853.47] td_error [8296.3125]\n",
      "state: [-0.15656768 -1.78420186  0.25876764  2.9040635 ] reward: 1.0 action: 0\n",
      "value_next [167107.16] td_target [158752.8] td_error [8039.75]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 44/200\n",
      "=============\n",
      "state: [ 0.00825928 -0.21151216  0.0272412   0.34571055] reward: 1.0 action: 0\n",
      "value_next [47815.207] td_target [45425.445] td_error [11209.52]\n",
      "state: [ 0.00402903 -0.40701081  0.03415541  0.64685764] reward: 1.0 action: 0\n",
      "value_next [62171.656] td_target [59064.074] td_error [10821.59]\n",
      "state: [-0.00411118 -0.60259159  0.04709257  0.95009745] reward: 1.0 action: 0\n",
      "value_next [77021.07] td_target [73171.016] td_error [10461.586]\n",
      "state: [-0.01616302 -0.79831473  0.06609451  1.25719676] reward: 1.0 action: 0\n",
      "value_next [92423.27] td_target [87803.11] td_error [10129.359]\n",
      "state: [-0.03212931 -0.99421744  0.09123845  1.56982686] reward: 1.0 action: 0\n",
      "value_next [108445.58] td_target [103024.3] td_error [9822.531]\n",
      "state: [-0.05201366 -1.19030247  0.12263499  1.88951794] reward: 1.0 action: 0\n",
      "value_next [125159.36] td_target [118902.39] td_error [9536.5625]\n",
      "state: [-0.07581971 -1.38652467  0.16042535  2.21760509] reward: 1.0 action: 0\n",
      "value_next [142636.47] td_target [135505.64] td_error [9264.578]\n",
      "state: [-0.1035502  -1.58277528  0.20477745  2.55516406] reward: 1.0 action: 0\n",
      "value_next [160945.03] td_target [152898.78] td_error [8997.0625]\n",
      "state: [-0.13520571 -1.77886406  0.25588073  2.90293638] reward: 1.0 action: 0\n",
      "value_next [180144.94] td_target [171138.69] td_error [8721.516]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 45/200\n",
      "==============\n",
      "state: [ 0.02111888 -0.23694861 -0.02041782  0.30510771] reward: 1.0 action: 0\n",
      "value_next [49923.17] td_target [47428.01] td_error [11754.285]\n",
      "state: [ 0.01637991 -0.43177373 -0.01431567  0.59128206] reward: 1.0 action: 0\n",
      "value_next [64945.56] td_target [61699.28] td_error [11342.832]\n",
      "state: [ 0.00774444 -0.62669236 -0.00249003  0.87942139] reward: 1.0 action: 0\n",
      "value_next [80476.06] td_target [76453.26] td_error [10962.887]\n",
      "state: [-0.00478941 -0.8217804   0.0150984   1.17132046] reward: 1.0 action: 0\n",
      "value_next [96577.47] td_target [91749.59] td_error [10614.391]\n",
      "state: [-0.02122502 -1.01709538  0.03852481  1.4686983 ] reward: 1.0 action: 0\n",
      "value_next [113320.516] td_target [107655.49] td_error [10295.344]\n",
      "state: [-0.04156693 -1.21266701  0.06789877  1.77316157] reward: 1.0 action: 0\n",
      "value_next [130781.01] td_target [124242.95] td_error [10001.773]\n",
      "state: [-0.06582027 -1.40848567  0.10336201  2.08615964] reward: 1.0 action: 0\n",
      "value_next [149036.64] td_target [141585.81] td_error [9727.625]\n",
      "state: [-0.09398998 -1.60448868  0.1450852   2.40892938] reward: 1.0 action: 0\n",
      "value_next [168163.38] td_target [159756.2] td_error [9464.328]\n",
      "state: [-0.12607975 -1.80054393  0.19326379  2.74242806] reward: 1.0 action: 0\n",
      "value_next [188231.42] td_target [178820.84] td_error [9200.625]\n",
      "state: [-0.16209063 -1.99643108  0.24811235  3.08725492] reward: 1.0 action: 0\n",
      "value_next [209300.42] td_target [198836.39] td_error [8921.9375]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 46/200\n",
      "==============\n",
      "state: [-0.04168137 -0.19436387 -0.00841778  0.27435201] reward: 1.0 action: 0\n",
      "value_next [53693.84] td_target [51010.15] td_error [12932.1875]\n",
      "state: [-0.04556864 -0.38936471 -0.00293074  0.56436811] reward: 1.0 action: 0\n",
      "value_next [70124.52] td_target [66619.3] td_error [12472.254]\n",
      "state: [-0.05335594 -0.58444542  0.00835662  0.85612628] reward: 1.0 action: 0\n",
      "value_next [87097.95] td_target [82744.055] td_error [12046.539]\n",
      "state: [-0.06504485 -0.77968024  0.02547915  1.1514251 ] reward: 1.0 action: 0\n",
      "value_next [104681.35] td_target [99448.28] td_error [11655.266]\n",
      "state: [-0.08063845 -0.9751252   0.04850765  1.45198746] reward: 1.0 action: 0\n",
      "value_next [122950.93] td_target [116804.38] td_error [11296.398]\n",
      "state: [-0.10014096 -1.17080834  0.0775474   1.75942255] reward: 1.0 action: 0\n",
      "value_next [141988.56] td_target [134890.12] td_error [10965.672]\n",
      "state: [-0.12355712 -1.36671798  0.11273585  2.07517996] reward: 1.0 action: 0\n",
      "value_next [161878.11] td_target [153785.2] td_error [10656.422]\n",
      "state: [-0.15089148 -1.56278886  0.15423945  2.40049348] reward: 1.0 action: 0\n",
      "value_next [182701.48] td_target [173567.4] td_error [10359.203]\n",
      "state: [-0.18214726 -1.75888559  0.20224932  2.7363135 ] reward: 1.0 action: 0\n",
      "value_next [204534.] td_target [194308.3] td_error [10061.625]\n",
      "state: [-0.21732497 -1.95478393  0.25697559  3.08322832] reward: 1.0 action: 0\n",
      "value_next [227438.88] td_target [216067.92] td_error [9747.594]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 47/200\n",
      "==============\n",
      "state: [-0.02764676 -0.2152132   0.01810969  0.3022694 ] reward: 1.0 action: 0\n",
      "value_next [60300.363] td_target [57286.344] td_error [14273.789]\n",
      "state: [-0.03195102 -0.41058852  0.02415508  0.60060827] reward: 1.0 action: 0\n",
      "value_next [78487.914] td_target [74564.516] td_error [13765.859]\n",
      "state: [-0.04016279 -0.60603991  0.03616725  0.90080074] reward: 1.0 action: 0\n",
      "value_next [97273.6] td_target [92410.92] td_error [13294.891]\n",
      "state: [-0.05228359 -0.80163276  0.05418326  1.204629  ] reward: 1.0 action: 0\n",
      "value_next [116731.48] td_target [110895.9] td_error [12860.695]\n",
      "state: [-0.06831624 -0.99741157  0.07827584  1.51378833] reward: 1.0 action: 0\n",
      "value_next [136944.64] td_target [130098.41] td_error [12460.477]\n",
      "state: [-0.08826448 -1.19338903  0.10855161  1.829844  ] reward: 1.0 action: 0\n",
      "value_next [158001.3] td_target [150102.23] td_error [12088.797]\n",
      "state: [-0.11213226 -1.38953321  0.14514849  2.15418007] reward: 1.0 action: 0\n",
      "value_next [179990.45] td_target [170991.92] td_error [11737.344]\n",
      "state: [-0.13992292 -1.5857525   0.18823209  2.48793793] reward: 1.0 action: 0\n",
      "value_next [202997.28] td_target [192848.42] td_error [11394.797]\n",
      "state: [-0.17163797 -1.78187816  0.23799085  2.83194407] reward: 1.0 action: 0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "value_next [227097.62] td_target [215743.73] td_error [11046.125]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 48/200\n",
      "==============\n",
      "state: [-0.00556177 -0.16300494 -0.02882413  0.2446606 ] reward: 1.0 action: 0\n",
      "value_next [58769.246] td_target [55831.78] td_error [15018.117]\n",
      "state: [-0.00882187 -0.35770359 -0.02393092  0.52811414] reward: 1.0 action: 0\n",
      "value_next [77591.73] td_target [73713.14] td_error [14478.352]\n",
      "state: [-0.01597594 -0.55248082 -0.01336864  0.81316131] reward: 1.0 action: 0\n",
      "value_next [97014.04] td_target [92164.336] td_error [13980.328]\n",
      "state: [-0.02702555 -0.74741714  0.00289459  1.1016094 ] reward: 1.0 action: 0\n",
      "value_next [117113.055] td_target [111258.4] td_error [13524.039]\n",
      "state: [-0.0419739  -0.94257706  0.02492677  1.39519905] reward: 1.0 action: 0\n",
      "value_next [137975.7] td_target [131077.92] td_error [13107.297]\n",
      "state: [-0.06082544 -1.13800013  0.05283076  1.69557031] reward: 1.0 action: 0\n",
      "value_next [159695.28] td_target [151711.52] td_error [12725.4375]\n",
      "state: [-0.08358544 -1.3336903   0.08674216  2.00422104] reward: 1.0 action: 0\n",
      "value_next [182367.94] td_target [173250.53] td_error [12371.3125]\n",
      "state: [-0.11025925 -1.52960293  0.12682658  2.32245534] reward: 1.0 action: 0\n",
      "value_next [206088.56] td_target [195785.12] td_error [12035.031]\n",
      "state: [-0.14085131 -1.72562924  0.17327569  2.65132029] reward: 1.0 action: 0\n",
      "value_next [230946.06] td_target [219399.75] td_error [11703.234]\n",
      "state: [-0.17536389 -1.92157819  0.2263021   2.99153081] reward: 1.0 action: 0\n",
      "value_next [257017.61] td_target [244167.72] td_error [11358.781]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 49/200\n",
      "===============\n",
      "state: [ 0.02291268 -0.21772805 -0.0157587   0.27420155] reward: 1.0 action: 0\n",
      "value_next [66363.72] td_target [63046.53] td_error [16286.426]\n",
      "state: [ 0.01855812 -0.41262165 -0.01027467  0.56187279] reward: 1.0 action: 0\n",
      "value_next [86918.01] td_target [82573.11] td_error [15699.531]\n",
      "state: [ 0.01030569 -0.60759792  0.00096279  0.85130105] reward: 1.0 action: 0\n",
      "value_next [108126.32] td_target [102721.] td_error [15157.414]\n",
      "state: [-0.00184627 -0.80273298  0.01798881  1.14428657] reward: 1.0 action: 0\n",
      "value_next [130072.53] td_target [123569.91] td_error [14659.742]\n",
      "state: [-0.01790093 -0.99808526  0.04087454  1.442556  ] reward: 1.0 action: 0\n",
      "value_next [152850.45] td_target [145208.92] td_error [14203.57]\n",
      "state: [-0.03786263 -1.1936859   0.06972566  1.74772566] reward: 1.0 action: 0\n",
      "value_next [176559.97] td_target [167732.97] td_error [13783.406]\n",
      "state: [-0.06173635 -1.38952741  0.10468017  2.06125687] reward: 1.0 action: 0\n",
      "value_next [201302.95] td_target [191238.8] td_error [13390.844]\n",
      "state: [-0.0895269  -1.58554996  0.14590531  2.38440096] reward: 1.0 action: 0\n",
      "value_next [227178.7] td_target [215820.77] td_error [13014.3125]\n",
      "state: [-0.1212379  -1.78162515  0.19359333  2.71813283] reward: 1.0 action: 0\n",
      "value_next [254278.64] td_target [241565.7] td_error [12638.5625]\n",
      "state: [-0.1568704  -1.97753736  0.24795598  3.06307304] reward: 1.0 action: 0\n",
      "value_next [282679.75] td_target [268546.75] td_error [12243.922]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 50/200\n",
      "===============\n",
      "state: [-0.00877098 -0.20718916  0.049194    0.33022686] reward: 1.0 action: 0\n",
      "value_next [76647.016] td_target [72815.664] td_error [18239.402]\n",
      "state: [-0.01291476 -0.40297562  0.05579853  0.63800846] reward: 1.0 action: 0\n",
      "value_next [99804.375] td_target [94815.16] td_error [17584.625]\n",
      "state: [-0.02097427 -0.59882942  0.0685587   0.9477282 ] reward: 1.0 action: 0\n",
      "value_next [123700.984] td_target [117516.94] td_error [16975.281]\n",
      "state: [-0.03295086 -0.7948042   0.08751327  1.26114026] reward: 1.0 action: 0\n",
      "value_next [148428.17] td_target [141007.77] td_error [16410.5]\n",
      "state: [-0.04884694 -0.99092958  0.11273607  1.57989859] reward: 1.0 action: 0\n",
      "value_next [174087.38] td_target [165384.] td_error [15885.984]\n",
      "state: [-0.06866554 -1.18719896  0.14433404  1.90550803] reward: 1.0 action: 0\n",
      "value_next [200784.6] td_target [190746.36] td_error [15393.797]\n",
      "state: [-0.09240951 -1.38355549  0.1824442   2.23926726] reward: 1.0 action: 0\n",
      "value_next [228624.81] td_target [217194.56] td_error [14922.266]\n",
      "state: [-0.12008062 -1.57987584  0.22722955  2.58220186] reward: 1.0 action: 0\n",
      "value_next [257705.66] td_target [244821.38] td_error [14455.469]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 51/200\n",
      "===============\n",
      "state: [-0.03703201 -0.22073033  0.027597    0.29101132] reward: 1.0 action: 0\n",
      "value_next [80343.42] td_target [76327.25] td_error [19081.953]\n",
      "state: [-0.04144662 -0.41623469  0.03341722  0.59226868] reward: 1.0 action: 0\n",
      "value_next [104541.87] td_target [99315.77] td_error [18380.906]\n",
      "state: [-0.04977131 -0.61180813  0.0452626   0.89528789] reward: 1.0 action: 0\n",
      "value_next [129489.76] td_target [123016.266] td_error [17731.29]\n",
      "state: [-0.06200748 -0.80751363  0.06316835  1.20184814] reward: 1.0 action: 0\n",
      "value_next [155285.97] td_target [147522.67] td_error [17132.336]\n",
      "state: [-0.07815775 -1.00339302  0.08720532  1.51364037] reward: 1.0 action: 0\n",
      "value_next [182039.88] td_target [172938.88] td_error [16579.766]\n",
      "state: [-0.09822561 -1.1994558   0.11747812  1.83222286] reward: 1.0 action: 0\n",
      "value_next [209866.28] td_target [199373.97] td_error [16065.891]\n",
      "state: [-0.12221473 -1.39566599  0.15412258  2.15896873] reward: 1.0 action: 0\n",
      "value_next [238879.92] td_target [226936.92] td_error [15579.266]\n",
      "state: [-0.15012805 -1.59192693  0.19730196  2.49500346] reward: 1.0 action: 0\n",
      "value_next [269189.28] td_target [255730.81] td_error [15104.391]\n",
      "state: [-0.18196658 -1.78806365  0.24720203  2.84113184] reward: 1.0 action: 0\n",
      "value_next [300889.4] td_target [285845.94] td_error [14621.0625]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 52/200\n",
      "================\n",
      "state: [-0.04645744 -0.17497867  0.00251738  0.2488442 ] reward: 1.0 action: 0\n",
      "value_next [80810.85] td_target [76771.305] td_error [20137.45]\n",
      "state: [-0.04995701 -0.37013648  0.00749426  0.5423201 ] reward: 1.0 action: 0\n",
      "value_next [106097.53] td_target [100793.66] td_error [19396.68]\n",
      "state: [-0.05735974 -0.56536295  0.01834067  0.83735488] reward: 1.0 action: 0\n",
      "value_next [132157.52] td_target [125550.64] td_error [18712.04]\n",
      "state: [-0.068667   -0.76073052  0.03508776  1.1357488 ] reward: 1.0 action: 0\n",
      "value_next [159093.6] td_target [151139.9] td_error [18083.047]\n",
      "state: [-0.08388161 -0.95629351  0.05780274  1.43922642] reward: 1.0 action: 0\n",
      "value_next [187020.3] td_target [177670.28] td_error [17505.875]\n",
      "state: [-0.10300748 -1.15207804  0.08658727  1.74939728] reward: 1.0 action: 0\n",
      "value_next [216058.81] td_target [205256.88] td_error [16973.375]\n",
      "state: [-0.12604904 -1.34807012  0.12157521  2.06770889] reward: 1.0 action: 0\n",
      "value_next [246331.98] td_target [234016.38] td_error [16474.719]\n",
      "state: [-0.15301044 -1.54420158  0.16292939  2.39538968] reward: 1.0 action: 0\n",
      "value_next [277958.22] td_target [264061.3] td_error [15995.172]\n",
      "state: [-0.18389447 -1.7403334   0.21083718  2.73338074] reward: 1.0 action: 0\n",
      "value_next [311044.84] td_target [295493.6] td_error [15515.25]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 53/200\n",
      "================\n",
      "state: [ 0.03169228 -0.16241587 -0.00240388  0.28158112] reward: 1.0 action: 0\n",
      "value_next [83857.18] td_target [79665.32] td_error [21460.508]\n",
      "state: [ 0.02844396 -0.35750345  0.00322775  0.57350491] reward: 1.0 action: 0\n",
      "value_next [110654.875] td_target [105123.13] td_error [20683.93]\n",
      "state: [ 0.02129389 -0.55267051  0.01469784  0.86720292] reward: 1.0 action: 0\n",
      "value_next [138275.94] td_target [131363.14] td_error [19967.07]\n",
      "state: [ 0.01024048 -0.74798934  0.0320419   1.16447055] reward: 1.0 action: 0\n",
      "value_next [166829.17] td_target [158488.72] td_error [19308.953]\n",
      "state: [-0.0047193  -0.94351344  0.05533131  1.46702486] reward: 1.0 action: 0\n",
      "value_next [196434.98] td_target [186614.23] td_error [18705.062]\n",
      "state: [-0.02358957 -1.13926738  0.08467181  1.77646551] reward: 1.0 action: 0\n",
      "value_next [227220.34] td_target [215860.33] td_error [18147.547]\n",
      "state: [-0.04637492 -1.33523492  0.12020112  2.0942275 ] reward: 1.0 action: 0\n",
      "value_next [259313.44] td_target [246348.77] td_error [17624.656]\n",
      "state: [-0.07307962 -1.53134476  0.16208567  2.42152383] reward: 1.0 action: 0\n",
      "value_next [292837.62] td_target [278196.75] td_error [17120.61]\n",
      "state: [-0.10370651 -1.72745387  0.21051615  2.75927664] reward: 1.0 action: 0\n",
      "value_next [327904.16] td_target [311509.94] td_error [16614.438]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 54/200\n",
      "================\n",
      "state: [-0.00700441 -0.17241478 -0.01870458  0.239996  ] reward: 1.0 action: 0\n",
      "value_next [88272.516] td_target [83859.89] td_error [22521.84]\n",
      "state: [-0.01045271 -0.36726461 -0.01390466  0.52672079] reward: 1.0 action: 0\n",
      "value_next [116377.77] td_target [110559.88] td_error [21689.57]\n",
      "state: [-0.017798   -0.56218818 -0.00337024  0.81499004] reward: 1.0 action: 0\n",
      "value_next [145324.06] td_target [138058.86] td_error [20921.852]\n",
      "state: [-0.02904176 -0.75726382  0.01292956  1.10661099] reward: 1.0 action: 0\n",
      "value_next [175225.73] td_target [166465.45] td_error [20218.016]\n",
      "state: [-0.04418704 -0.95255334  0.03506178  1.40332198] reward: 1.0 action: 0\n",
      "value_next [206209.73] td_target [195900.25] td_error [19573.953]\n",
      "state: [-0.0632381  -1.14809279  0.06312822  1.70675693] reward: 1.0 action: 0\n",
      "value_next [238410.67] td_target [226491.14] td_error [18982.188]\n",
      "state: [-0.08619996 -1.34388138  0.09726336  2.01840219] reward: 1.0 action: 0\n",
      "value_next [271965.7] td_target [258368.4] td_error [18431.516]\n",
      "state: [-0.11307759 -1.53986824  0.1376314   2.33954339] reward: 1.0 action: 0\n",
      "value_next [307008.25] td_target [291658.84] td_error [17906.438]\n",
      "state: [-0.14387495 -1.73593652  0.18442227  2.67120071] reward: 1.0 action: 0\n",
      "value_next [343661.6] td_target [326479.5] td_error [17386.656]\n",
      "state: [-0.17859368 -1.9318851   0.23784628  3.01405266] reward: 1.0 action: 0\n",
      "value_next [382030.1] td_target [362929.6] td_error [16846.219]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 55/200\n",
      "================\n",
      "state: [ 0.04487854 -0.17334603 -0.01718529  0.29737973] reward: 1.0 action: 0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "value_next [95553.805] td_target [90777.12] td_error [24054.312]\n",
      "state: [ 0.04141162 -0.36821884 -0.01123769  0.58459355] reward: 1.0 action: 0\n",
      "value_next [125663.45] td_target [119381.28] td_error [23185.164]\n",
      "state: [ 3.40472453e-02 -5.63181585e-01  4.54176934e-04  8.73715394e-01] reward: 1.0 action: 0\n",
      "value_next [156697.88] td_target [148863.98] td_error [22383.633]\n",
      "state: [ 0.02278361 -0.75830971  0.01792848  1.16654108] reward: 1.0 action: 0\n",
      "value_next [188778.] td_target [179340.1] td_error [21648.36]\n",
      "state: [ 0.00761742 -0.95366033  0.04125931  1.46479061] reward: 1.0 action: 0\n",
      "value_next [222037.] td_target [210936.14] td_error [20974.297]\n",
      "state: [-0.01145579 -1.14926264  0.07055512  1.77007116] reward: 1.0 action: 0\n",
      "value_next [256615.2] td_target [243785.44] td_error [20352.828]\n",
      "state: [-0.03444104 -1.34510639  0.10595654  2.08383191] reward: 1.0 action: 0\n",
      "value_next [292654.34] td_target [278022.62] td_error [19771.078]\n",
      "state: [-0.06134317 -1.5411281   0.14763318  2.40730847] reward: 1.0 action: 0\n",
      "value_next [330291.22] td_target [313777.66] td_error [19211.781]\n",
      "state: [-0.09216573 -1.73719462  0.19577935  2.74145568] reward: 1.0 action: 0\n",
      "value_next [369649.97] td_target [351168.47] td_error [18652.125]\n",
      "state: [-0.12690962 -1.93308437  0.25060846  3.08686913] reward: 1.0 action: 0\n",
      "value_next [410833.4] td_target [390292.72] td_error [18063.312]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 56/200\n",
      "=================\n",
      "state: [-0.01008136 -0.19572338 -0.02658085  0.2450301 ] reward: 1.0 action: 0\n",
      "value_next [102237.16] td_target [97126.3] td_error [25479.86]\n",
      "state: [-0.01399582 -0.39045579 -0.02168025  0.52921169] reward: 1.0 action: 0\n",
      "value_next [134153.23] td_target [127446.57] td_error [24532.758]\n",
      "state: [-0.02180494 -0.58526614 -0.01109602  0.81498504] reward: 1.0 action: 0\n",
      "value_next [167020.3] td_target [158670.28] td_error [23659.047]\n",
      "state: [-0.03351026 -0.7802344   0.00520368  1.10415727] reward: 1.0 action: 0\n",
      "value_next [200966.08] td_target [190918.77] td_error [22857.86]\n",
      "state: [-0.04911495 -0.97542441  0.02728683  1.39846819] reward: 1.0 action: 0\n",
      "value_next [236131.77] td_target [224326.17] td_error [22124.516]\n",
      "state: [-0.06862344 -1.17087479  0.05525619  1.69955596] reward: 1.0 action: 0\n",
      "value_next [272666.9] td_target [259034.56] td_error [21450.703]\n",
      "state: [-0.09204093 -1.36658827  0.08924731  2.00891518] reward: 1.0 action: 0\n",
      "value_next [310723.56] td_target [295188.38] td_error [20823.75]\n",
      "state: [-0.1193727  -1.56251856  0.12942562  2.32784483] reward: 1.0 action: 0\n",
      "value_next [350450.16] td_target [332928.66] td_error [20226.719]\n",
      "state: [-0.15062307 -1.75855474  0.17598251  2.65738467] reward: 1.0 action: 0\n",
      "value_next [391983.5] td_target [372385.3] td_error [19636.812]\n",
      "state: [-0.18579417 -1.9545031   0.22913021  2.9982398 ] reward: 1.0 action: 0\n",
      "value_next [435440.25] td_target [413669.22] td_error [19025.188]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 57/200\n",
      "=================\n",
      "state: [-0.04938629 -0.23428245  0.02883161  0.27791173] reward: 1.0 action: 0\n",
      "value_next [118625.36] td_target [112695.09] td_error [28003.836]\n",
      "state: [-0.05407194 -0.4298036   0.03438985  0.57954688] reward: 1.0 action: 0\n",
      "value_next [154048.56] td_target [146347.12] td_error [26951.188]\n",
      "state: [-0.06266801 -0.62539018  0.04598078  0.88286191] reward: 1.0 action: 0\n",
      "value_next [190519.48] td_target [180994.52] td_error [25975.375]\n",
      "state: [-0.07517581 -0.82110542  0.06363802  1.18963808] reward: 1.0 action: 0\n",
      "value_next [228178.36] td_target [216770.44] td_error [25074.688]\n",
      "state: [-0.09159792 -1.0169918   0.08743078  1.50156982] reward: 1.0 action: 0\n",
      "value_next [267178.84] td_target [253820.89] td_error [24242.61]\n",
      "state: [-0.11193776 -1.21305979  0.11746218  1.82022036] reward: 1.0 action: 0\n",
      "value_next [307680.7] td_target [292297.66] td_error [23467.625]\n",
      "state: [-0.13619895 -1.40927487  0.15386659  2.1469694 ] reward: 1.0 action: 0\n",
      "value_next [349842.56] td_target [332351.44] td_error [22733.094]\n",
      "state: [-0.16438445 -1.60554229  0.19680598  2.48295093] reward: 1.0 action: 0\n",
      "value_next [393813.3] td_target [374123.66] td_error [22016.375]\n",
      "state: [-0.1964953  -1.80168955  0.24646499  2.82898038] reward: 1.0 action: 0\n",
      "value_next [439722.06] td_target [417736.97] td_error [21288.312]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 58/200\n",
      "=================\n",
      "state: [-0.00218435 -0.23864123  0.04983639  0.339165  ] reward: 1.0 action: 0\n",
      "value_next [128966.305] td_target [122518.984] td_error [30012.305]\n",
      "state: [-0.00695718 -0.43443558  0.05661969  0.64713772] reward: 1.0 action: 0\n",
      "value_next [167047.84] td_target [158696.45] td_error [28902.328]\n",
      "state: [-0.01564589 -0.63029879  0.06956244  0.95709904] reward: 1.0 action: 0\n",
      "value_next [206271.84] td_target [195959.25] td_error [27872.156]\n",
      "state: [-0.02825186 -0.82628376  0.08870442  1.27080037] reward: 1.0 action: 0\n",
      "value_next [246788.02] td_target [234449.61] td_error [26918.703]\n",
      "state: [-0.04477754 -1.02241899  0.11412043  1.58989144] reward: 1.0 action: 0\n",
      "value_next [288758.06] td_target [274321.16] td_error [26033.453]\n",
      "state: [-0.06522592 -1.21869638  0.14591826  1.91587106] reward: 1.0 action: 0\n",
      "value_next [332348.] td_target [315731.6] td_error [25202.281]\n",
      "state: [-0.08959985 -1.41505699  0.18423568  2.25002972] reward: 1.0 action: 0\n",
      "value_next [377719.4] td_target [358834.44] td_error [24405.312]\n",
      "state: [-0.11790099 -1.61137485  0.22923628  2.59338234] reward: 1.0 action: 0\n",
      "value_next [425019.9] td_target [403769.9] td_error [23615.906]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 59/200\n",
      "==================\n",
      "state: [ 0.04629307 -0.15604974 -0.04301977  0.23673871] reward: 1.0 action: 0\n",
      "value_next [113522.68] td_target [107847.55] td_error [30181.273]\n",
      "state: [ 0.04317207 -0.35053151 -0.038285    0.51554756] reward: 1.0 action: 0\n",
      "value_next [150817.03] td_target [143277.17] td_error [29057.14]\n",
      "state: [ 0.03616144 -0.54509401 -0.02797405  0.7959244 ] reward: 1.0 action: 0\n",
      "value_next [189190.11] td_target [179731.61] td_error [28023.547]\n",
      "state: [ 0.02525956 -0.73982112 -0.01205556  1.07967749] reward: 1.0 action: 0\n",
      "value_next [228792.45] td_target [217353.83] td_error [27078.625]\n",
      "state: [ 0.01046314 -0.93478183  0.00953799  1.36855306] reward: 1.0 action: 0\n",
      "value_next [269789.5] td_target [256301.02] td_error [26216.734]\n",
      "state: [-0.0082325  -1.13002183  0.03690905  1.66420387] reward: 1.0 action: 0\n",
      "value_next [312355.6] td_target [296738.8] td_error [25428.188]\n",
      "state: [-0.03083293 -1.32555345  0.07019313  1.96815033] reward: 1.0 action: 0\n",
      "value_next [356668.12] td_target [338835.72] td_error [24698.688]\n",
      "state: [-0.057344   -1.52134325  0.10955613  2.28173166] reward: 1.0 action: 0\n",
      "value_next [402900.6] td_target [382756.56] td_error [24008.938]\n",
      "state: [-0.08777087 -1.71729709  0.15519077  2.60604534] reward: 1.0 action: 0\n",
      "value_next [451214.66] td_target [428654.9] td_error [23333.75]\n",
      "state: [-0.12211681 -1.91324245  0.20731167  2.94187425] reward: 1.0 action: 0\n",
      "value_next [501749.94] td_target [476663.44] td_error [22640.5]\n",
      "state: [-0.16038166 -2.10890861  0.26614916  3.28960301] reward: 1.0 action: 0\n",
      "value_next [554612.9] td_target [526883.25] td_error [21888.938]\n",
      "reward:11.0, max reward:12.0, episode len:11\n",
      "\n",
      "Episode 60/200\n",
      "==================\n",
      "state: [ 0.00344432 -0.19887993  0.04135786  0.29390121] reward: 1.0 action: 0\n",
      "value_next [137057.84] td_target [130205.95] td_error [33779.203]\n",
      "state: [-5.33282632e-04 -3.94566378e-01  4.72358826e-02  5.99335623e-01] reward: 1.0 action: 0\n",
      "value_next [179393.08] td_target [170424.42] td_error [32522.156]\n",
      "state: [-0.00842461 -0.59031627  0.0592226   0.90651514] reward: 1.0 action: 0\n",
      "value_next [222967.11] td_target [211819.75] td_error [31354.688]\n",
      "state: [-0.02023094 -0.78618792  0.0773529   1.21720878] reward: 1.0 action: 0\n",
      "value_next [267941.84] td_target [254545.75] td_error [30274.234]\n",
      "state: [-0.03595469 -0.98221745  0.10169707  1.5330929 ] reward: 1.0 action: 0\n",
      "value_next [314493.72] td_target [298770.03] td_error [29272.406]\n",
      "state: [-0.05559904 -1.1784071   0.13235893  1.85570433] reward: 1.0 action: 0\n",
      "value_next [362805.22] td_target [344665.97] td_error [28334.812]\n",
      "state: [-0.07916718 -1.37471163  0.16947302  2.18638561] reward: 1.0 action: 0\n",
      "value_next [413055.72] td_target [392403.94] td_error [27440.562]\n",
      "state: [-0.10666142 -1.57102258  0.21320073  2.52622036] reward: 1.0 action: 0\n",
      "value_next [465411.3] td_target [442141.75] td_error [26561.719]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 61/200\n",
      "==================\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [ 0.02863637 -0.16733863 -0.02605543  0.29451193] reward: 1.0 action: 0\n",
      "value_next [135280.78] td_target [128517.74] td_error [33957.82]\n",
      "state: [ 0.0252896  -0.36207961 -0.02016519  0.57886482] reward: 1.0 action: 0\n",
      "value_next [177654.3] td_target [168772.58] td_error [32695.688]\n",
      "state: [ 0.018048   -0.55691323 -0.00858789  0.86512768] reward: 1.0 action: 0\n",
      "value_next [221256.97] td_target [210195.12] td_error [31534.516]\n",
      "state: [ 0.00690974 -0.75191725  0.00871466  1.1550981 ] reward: 1.0 action: 0\n",
      "value_next [266259.94] td_target [252947.94] td_error [30471.25]\n",
      "state: [-0.00812861 -0.94715174  0.03181662  1.4505008 ] reward: 1.0 action: 0\n",
      "value_next [312849.06] td_target [297207.6] td_error [29498.281]\n",
      "state: [-0.02707164 -1.14264991  0.06082664  1.75295215] reward: 1.0 action: 0\n",
      "value_next [361218.22] td_target [343158.3] td_error [28603.062]\n",
      "state: [-0.04992464 -1.33840689  0.09588568  2.06391658] reward: 1.0 action: 0\n",
      "value_next [411561.94] td_target [390984.84] td_error [27767.594]\n",
      "state: [-0.07669278 -1.53436626  0.13716401  2.38465264] reward: 1.0 action: 0\n",
      "value_next [464067.12] td_target [440864.75] td_error [26967.812]\n",
      "state: [-0.1073801  -1.730404    0.18485707  2.71614728] reward: 1.0 action: 0\n",
      "value_next [518902.94] td_target [492958.78] td_error [26172.125]\n",
      "state: [-0.14198818 -1.92630985  0.23918001  3.05903842] reward: 1.0 action: 0\n",
      "value_next [576209.56] td_target [547400.06] td_error [25341.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 62/200\n",
      "===================\n",
      "state: [ 0.04223145 -0.14879007  0.00183346  0.2852885 ] reward: 1.0 action: 0\n",
      "value_next [141778.31] td_target [134690.39] td_error [36830.953]\n",
      "state: [ 0.03925565 -0.34393812  0.00753923  0.57854912] reward: 1.0 action: 0\n",
      "value_next [187436.56] td_target [178065.73] td_error [35459.28]\n",
      "state: [ 0.03237689 -0.53916491  0.01911021  0.87359751] reward: 1.0 action: 0\n",
      "value_next [234405.17] td_target [222685.9] td_error [34192.094]\n",
      "state: [ 0.02159359 -0.73454142  0.03658216  1.17222681] reward: 1.0 action: 0\n",
      "value_next [282862.28] td_target [268720.16] td_error [33026.406]\n",
      "state: [ 0.00690276 -0.93011937  0.0600267   1.47615022] reward: 1.0 action: 0\n",
      "value_next [333001.84] td_target [316352.75] td_error [31953.719]\n",
      "state: [-0.01169963 -1.12592103  0.0895497   1.78696099] reward: 1.0 action: 0\n",
      "value_next [385025.34] td_target [365775.06] td_error [30959.656]\n",
      "state: [-0.03421805 -1.32192709  0.12528892  2.10608445] reward: 1.0 action: 0\n",
      "value_next [439133.47] td_target [417177.78] td_error [30023.719]\n",
      "state: [-0.06065659 -1.51806227  0.16741061  2.43471979] reward: 1.0 action: 0\n",
      "value_next [495516.72] td_target [470741.88] td_error [29118.562]\n",
      "state: [-0.09101784 -1.71417842  0.21610501  2.77377047] reward: 1.0 action: 0\n",
      "value_next [554343.2] td_target [526627.] td_error [28208.219]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 63/200\n",
      "===================\n",
      "state: [-0.01715327 -0.21329049 -0.02243336  0.28557752] reward: 1.0 action: 0\n",
      "value_next [157597.5] td_target [149718.62] td_error [37653.156]\n",
      "state: [-0.02141908 -0.40808543 -0.01672181  0.57110156] reward: 1.0 action: 0\n",
      "value_next [204954.2] td_target [194707.48] td_error [36219.594]\n",
      "state: [-0.02958079 -0.60296895 -0.00529978  0.85846998] reward: 1.0 action: 0\n",
      "value_next [253653.28] td_target [240971.61] td_error [34899.656]\n",
      "state: [-0.04164017 -0.7980183   0.01186962  1.14948178] reward: 1.0 action: 0\n",
      "value_next [303886.22] td_target [288692.9] td_error [33690.03]\n",
      "state: [-0.05760054 -0.99329315  0.03485926  1.44586301] reward: 1.0 action: 0\n",
      "value_next [355860.16] td_target [338068.16] td_error [32582.031]\n",
      "state: [-0.0774664  -1.18882617  0.06377652  1.7492309 ] reward: 1.0 action: 0\n",
      "value_next [409790.5] td_target [389301.97] td_error [31561.5]\n",
      "state: [-0.10124292 -1.38461185  0.09876113  2.06104994] reward: 1.0 action: 0\n",
      "value_next [465892.9] td_target [442599.25] td_error [30608.438]\n",
      "state: [-0.12893516 -1.58059294  0.13998213  2.38257767] reward: 1.0 action: 0\n",
      "value_next [524373.56] td_target [498155.88] td_error [29695.625]\n",
      "state: [-0.16054702 -1.77664433  0.18763369  2.71479871] reward: 1.0 action: 0\n",
      "value_next [585418.5] td_target [556148.56] td_error [28788.]\n",
      "state: [-0.1960799  -1.97255447  0.24192966  3.0583473 ] reward: 1.0 action: 0\n",
      "value_next [649180.06] td_target [616722.06] td_error [27841.125]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 64/200\n",
      "===================\n",
      "state: [ 0.02491973 -0.22454904  0.03436533  0.28530268] reward: 1.0 action: 0\n",
      "value_next [168889.03] td_target [160445.58] td_error [41559.97]\n",
      "state: [ 0.02042875 -0.42014383  0.04007139  0.58862305] reward: 1.0 action: 0\n",
      "value_next [220861.62] td_target [209819.55] td_error [39976.047]\n",
      "state: [ 0.01202587 -0.61580333  0.05184385  0.89365454] reward: 1.0 action: 0\n",
      "value_next [274293.47] td_target [260579.8] td_error [38507.75]\n",
      "state: [-2.90194079e-04 -8.11588621e-01  6.97169373e-02  1.20217316e+00] reward: 1.0 action: 0\n",
      "value_next [329385.1] td_target [312916.84] td_error [37151.062]\n",
      "state: [-0.01652197 -1.00753938  0.0937604   1.51586546] reward: 1.0 action: 0\n",
      "value_next [386352.94] td_target [367036.28] td_error [35895.188]\n",
      "state: [-0.03667275 -1.20366246  0.12407771  1.83628312] reward: 1.0 action: 0\n",
      "value_next [445419.16] td_target [423149.2] td_error [34722.188]\n",
      "state: [-0.060746   -1.39991858  0.16080337  2.16478946] reward: 1.0 action: 0\n",
      "value_next [506801.5] td_target [481462.4] td_error [33606.656]\n",
      "state: [-0.08874437 -1.59620691  0.20409916  2.50249619] reward: 1.0 action: 0\n",
      "value_next [570701.56] td_target [542167.5] td_error [32515.094]\n",
      "state: [-0.12066851 -1.79234739  0.25414909  2.85018968] reward: 1.0 action: 0\n",
      "value_next [637290.75] td_target [605427.2] td_error [31403.812]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 65/200\n",
      "====================\n",
      "state: [-0.00210947 -0.2241747  -0.00470379  0.32524264] reward: 1.0 action: 0\n",
      "value_next [180657.08] td_target [171625.22] td_error [42244.133]\n",
      "state: [-0.00659296 -0.41922937  0.00180106  0.61643848] reward: 1.0 action: 0\n",
      "value_next [234011.67] td_target [222312.08] td_error [40647.484]\n",
      "state: [-0.01497755 -0.61437643  0.01412983  0.90968812] reward: 1.0 action: 0\n",
      "value_next [288892.34] td_target [274448.72] td_error [39175.92]\n",
      "state: [-0.02726508 -0.80968674  0.0323236   1.20677837] reward: 1.0 action: 0\n",
      "value_next [345512.16] td_target [328237.53] td_error [37824.625]\n",
      "state: [-0.04345881 -1.00521108  0.05645916  1.50941316] reward: 1.0 action: 0\n",
      "value_next [404099.4] td_target [383895.44] td_error [36582.406]\n",
      "state: [-0.06356304 -1.20096998  0.08664743  1.8191738 ] reward: 1.0 action: 0\n",
      "value_next [464889.22] td_target [441645.75] td_error [35431.688]\n",
      "state: [-0.08758243 -1.39694157  0.1230309   2.13747088] reward: 1.0 action: 0\n",
      "value_next [528113.5] td_target [501708.8] td_error [34347.625]\n",
      "state: [-0.11552127 -1.5930471   0.16578032  2.46548561] reward: 1.0 action: 0\n",
      "value_next [593990.4] td_target [564291.9] td_error [33297.438]\n",
      "state: [-0.14738221 -1.7891339   0.21509003  2.80409972] reward: 1.0 action: 0\n",
      "value_next [662710.44] td_target [629575.94] td_error [32238.75]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 66/200\n",
      "====================\n",
      "state: [-0.00292557 -0.22243189  0.03359742  0.29442467] reward: 1.0 action: 0\n",
      "value_next [189290.92] td_target [179827.38] td_error [45716.344]\n",
      "state: [-0.0073742  -0.41801633  0.03948591  0.59751154] reward: 1.0 action: 0\n",
      "value_next [246609.39] td_target [234279.92] td_error [43961.734]\n",
      "state: [-0.01573453 -0.61366792  0.05143614  0.90236601] reward: 1.0 action: 0\n",
      "value_next [305516.03] td_target [290241.22] td_error [42336.78]\n",
      "state: [-0.02800789 -0.80944753  0.06948346  1.21076259] reward: 1.0 action: 0\n",
      "value_next [366234.12] td_target [347923.4] td_error [40836.625]\n",
      "state: [-0.04419684 -1.00539437  0.09369871  1.52438532] reward: 1.0 action: 0\n",
      "value_next [429003.03] td_target [407553.88] td_error [39448.656]\n",
      "state: [-0.06430473 -1.20151451  0.12418642  1.84478225] reward: 1.0 action: 0\n",
      "value_next [494067.9] td_target [469365.5] td_error [38152.938]\n",
      "state: [-0.08833502 -1.39776763  0.16108207  2.17331187] reward: 1.0 action: 0\n",
      "value_next [561668.] td_target [533585.56] td_error [36921.156]\n",
      "state: [-0.11629037 -1.59405145  0.2045483   2.51107965] reward: 1.0 action: 0\n",
      "value_next [632023.44] td_target [600423.25] td_error [35715.312]\n",
      "state: [-0.1481714  -1.79018409  0.2547699   2.85886411] reward: 1.0 action: 0\n",
      "value_next [705321.06] td_target [670056.] td_error [34487.75]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 67/200\n",
      "====================\n",
      "state: [-0.02593005 -0.19966913  0.00722331  0.31656457] reward: 1.0 action: 0\n",
      "value_next [198141.53] td_target [188235.45] td_error [46981.875]\n",
      "state: [-0.02992343 -0.39489322  0.0135546   0.6115167 ] reward: 1.0 action: 0\n",
      "value_next [257266.95] td_target [244404.61] td_error [45195.61]\n",
      "state: [-0.03782129 -0.59020197  0.02578493  0.90843784] reward: 1.0 action: 0\n",
      "value_next [318055.28] td_target [302153.5] td_error [43547.938]\n",
      "state: [-0.04962533 -0.78566329  0.04395369  1.20911228] reward: 1.0 action: 0\n",
      "value_next [380741.2] td_target [361705.12] td_error [42033.375]\n",
      "state: [-0.0653386  -0.98132448  0.06813593  1.51523883] reward: 1.0 action: 0\n",
      "value_next [445575.75] td_target [423297.97] td_error [40639.188]\n",
      "state: [-0.08496509 -1.17720164  0.09844071  1.82838928] reward: 1.0 action: 0\n",
      "value_next [512816.53] td_target [487176.7] td_error [39345.156]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.10850912 -1.37326715  0.1350085   2.14995856] reward: 1.0 action: 0\n",
      "value_next [582716.44] td_target [553581.6] td_error [38123.344]\n",
      "state: [-0.13597446 -1.56943485  0.17800767  2.4811045 ] reward: 1.0 action: 0\n",
      "value_next [655510.94] td_target [622736.4] td_error [36935.938]\n",
      "state: [-0.16736316 -1.76554277  0.22762976  2.82267647] reward: 1.0 action: 0\n",
      "value_next [731403.7] td_target [694834.5] td_error [35735.25]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 68/200\n",
      "====================\n",
      "state: [ 0.05041559 -0.16808035  0.0186823   0.25383258] reward: 1.0 action: 0\n",
      "value_next [190628.75] td_target [181098.31] td_error [50428.344]\n",
      "state: [ 0.04705398 -0.36346401  0.02375895  0.5523492 ] reward: 1.0 action: 0\n",
      "value_next [252759.38] td_target [240122.4] td_error [48498.594]\n",
      "state: [ 0.0397847  -0.55891143  0.03480594  0.85242206] reward: 1.0 action: 0\n",
      "value_next [316576.34] td_target [300748.53] td_error [46714.297]\n",
      "state: [ 0.02860647 -0.75449014  0.05185438  1.15584334] reward: 1.0 action: 0\n",
      "value_next [382320.06] td_target [363205.06] td_error [45070.562]\n",
      "state: [ 0.01351667 -0.95024839  0.07497124  1.46432438] reward: 1.0 action: 0\n",
      "value_next [450249.38] td_target [427737.9] td_error [43554.812]\n",
      "state: [-0.0054883  -1.14620439  0.10425773  1.7794536 ] reward: 1.0 action: 0\n",
      "value_next [520630.75] td_target [494600.22] td_error [42146.906]\n",
      "state: [-0.02841238 -1.34233382  0.1398468   2.10264657] reward: 1.0 action: 0\n",
      "value_next [593726.4] td_target [564041.06] td_error [40817.688]\n",
      "state: [-0.05525906 -1.53855513  0.18189973  2.43508616] reward: 1.0 action: 0\n",
      "value_next [669781.8] td_target [636293.7] td_error [39529.062]\n",
      "state: [-0.08603016 -1.73471243  0.23060146  2.77765185] reward: 1.0 action: 0\n",
      "value_next [749009.9] td_target [711560.4] td_error [38231.312]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 69/200\n",
      "=====================\n",
      "state: [-0.03901767 -0.16741901 -0.03050384  0.26668601] reward: 1.0 action: 0\n",
      "value_next [204485.67] td_target [194262.39] td_error [50471.453]\n",
      "state: [-0.04236605 -0.36209263 -0.02517012  0.54959385] reward: 1.0 action: 0\n",
      "value_next [267425.] td_target [254054.75] td_error [48528.64]\n",
      "state: [-0.0496079  -0.55685214 -0.01417825  0.83424136] reward: 1.0 action: 0\n",
      "value_next [332082.6] td_target [315479.47] td_error [46742.594]\n",
      "state: [-0.06074494 -0.75177754  0.00250658  1.12243181] reward: 1.0 action: 0\n",
      "value_next [398711.84] td_target [378777.25] td_error [45108.47]\n",
      "state: [-0.07578049 -0.94693227  0.02495522  1.41589991] reward: 1.0 action: 0\n",
      "value_next [467585.47] td_target [444207.2] td_error [43614.562]\n",
      "state: [-0.09471914 -1.14235426  0.05327321  1.7162777 ] reward: 1.0 action: 0\n",
      "value_next [538985.6] td_target [512037.34] td_error [42242.312]\n",
      "state: [-0.11756622 -1.33804516  0.08759877  2.02505253] reward: 1.0 action: 0\n",
      "value_next [613194.6] td_target [582535.9] td_error [40965.625]\n",
      "state: [-0.14432713 -1.53395725  0.12809982  2.343515  ] reward: 1.0 action: 0\n",
      "value_next [690482.3] td_target [655959.2] td_error [39749.5]\n",
      "state: [-0.17500627 -1.72997774  0.17497012  2.67269499] reward: 1.0 action: 0\n",
      "value_next [771093.2] td_target [732539.5] td_error [38548.375]\n",
      "state: [-0.20960583 -1.9259105   0.22842402  3.01328595] reward: 1.0 action: 0\n",
      "value_next [855228.25] td_target [812467.8] td_error [37304.875]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 70/200\n",
      "=====================\n",
      "state: [ 0.04295091 -0.20032784  0.04218793  0.34717982] reward: 1.0 action: 0\n",
      "value_next [229652.47] td_target [218170.84] td_error [55623.17]\n",
      "state: [ 0.03894435 -0.39602368  0.04913152  0.65286196] reward: 1.0 action: 0\n",
      "value_next [299296.3] td_target [284332.5] td_error [53520.344]\n",
      "state: [ 0.03102388 -0.59179415  0.06218876  0.96060216] reward: 1.0 action: 0\n",
      "value_next [370880.4] td_target [352337.38] td_error [51572.344]\n",
      "state: [ 0.01918799 -0.78769436  0.08140081  1.27215615] reward: 1.0 action: 0\n",
      "value_next [444670.94] td_target [422438.38] td_error [49771.125]\n",
      "state: [ 0.00343411 -0.98375534  0.10684393  1.58917924] reward: 1.0 action: 0\n",
      "value_next [520950.] td_target [494903.5] td_error [48099.594]\n",
      "state: [-0.016241   -1.17997193  0.13862751  1.91317815] reward: 1.0 action: 0\n",
      "value_next [600002.7] td_target [570003.56] td_error [46531.094]\n",
      "state: [-0.03984044 -1.37628889  0.17689108  2.24545463] reward: 1.0 action: 0\n",
      "value_next [682102.9] td_target [647998.75] td_error [45028.812]\n",
      "state: [-0.06736622 -1.57258475  0.22180017  2.58703898] reward: 1.0 action: 0\n",
      "value_next [767497.25] td_target [729123.4] td_error [43544.25]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 71/200\n",
      "=====================\n",
      "state: [-0.02550638 -0.18573635  0.04872998  0.30519274] reward: 1.0 action: 0\n",
      "value_next [238521.6] td_target [226596.52] td_error [58096.17]\n",
      "state: [-0.02922111 -0.38151764  0.05483384  0.612837  ] reward: 1.0 action: 0\n",
      "value_next [311111.3] td_target [295556.75] td_error [55854.42]\n",
      "state: [-0.03685146 -0.57736129  0.06709058  0.92227402] reward: 1.0 action: 0\n",
      "value_next [385658.5] td_target [366376.56] td_error [53776.312]\n",
      "state: [-0.04839869 -0.77332246  0.08553606  1.23526432] reward: 1.0 action: 0\n",
      "value_next [462442.1] td_target [439320.97] td_error [51854.5]\n",
      "state: [-0.06386514 -0.96943323  0.11024135  1.55347197] reward: 1.0 action: 0\n",
      "value_next [541758.6] td_target [514671.7] td_error [50071.875]\n",
      "state: [-0.0832538  -1.16569058  0.14131079  1.87841629] reward: 1.0 action: 0\n",
      "value_next [623908.8] td_target [592714.4] td_error [48401.5]\n",
      "state: [-0.10656761 -1.36204245  0.17887911  2.21141554] reward: 1.0 action: 0\n",
      "value_next [709182.4] td_target [673724.25] td_error [46805.438]\n",
      "state: [-0.13380846 -1.55837172  0.22310742  2.55352088] reward: 1.0 action: 0\n",
      "value_next [797841.5] td_target [757950.44] td_error [45234.312]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 72/200\n",
      "======================\n",
      "state: [ 0.01470368 -0.23836162 -0.01282732  0.28229708] reward: 1.0 action: 0\n",
      "value_next [241217.03] td_target [229157.17] td_error [57908.125]\n",
      "state: [ 0.00993645 -0.43329828 -0.00718138  0.57090688] reward: 1.0 action: 0\n",
      "value_next [313727.94] td_target [298042.53] td_error [55659.094]\n",
      "state: [ 0.00127048 -0.62831879  0.00423676  0.8613188 ] reward: 1.0 action: 0\n",
      "value_next [388188.47] td_target [368780.03] td_error [53590.406]\n",
      "state: [-0.01129589 -0.82349818  0.02146314  1.15533086] reward: 1.0 action: 0\n",
      "value_next [464891.6] td_target [441648.] td_error [51694.72]\n",
      "state: [-0.02776586 -1.01889332  0.04456975  1.45466573] reward: 1.0 action: 0\n",
      "value_next [544148.9] td_target [516942.44] td_error [49956.75]\n",
      "state: [-0.04814372 -1.21453322  0.07366307  1.76093333] reward: 1.0 action: 0\n",
      "value_next [626279.4] td_target [594966.4] td_error [48353.125]\n",
      "state: [-0.07243439 -1.41040751  0.10888173  2.07558534] reward: 1.0 action: 0\n",
      "value_next [711597.56] td_target [676018.7] td_error [46851.312]\n",
      "state: [-0.10064254 -1.60645256  0.15039344  2.39985954] reward: 1.0 action: 0\n",
      "value_next [800399.7] td_target [760380.7] td_error [45408.25]\n",
      "state: [-0.13277159 -1.80253506  0.19839063  2.73471247] reward: 1.0 action: 0\n",
      "value_next [892947.2] td_target [848300.8] td_error [43969.062]\n",
      "state: [-0.16882229 -1.99843332  0.25308488  3.08074103] reward: 1.0 action: 0\n",
      "value_next [989446.94] td_target [939975.56] td_error [42464.312]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 73/200\n",
      "======================\n",
      "state: [ 0.02329195 -0.15735408  0.0352931   0.29793656] reward: 1.0 action: 0\n",
      "value_next [247883.64] td_target [235490.45] td_error [63185.234]\n",
      "state: [ 0.02014487 -0.35296089  0.04125183  0.60153807] reward: 1.0 action: 0\n",
      "value_next [326176.47] td_target [309868.66] td_error [60774.734]\n",
      "state: [ 0.01308566 -0.54863487  0.05328259  0.90692394] reward: 1.0 action: 0\n",
      "value_next [406589.94] td_target [386261.44] td_error [58542.844]\n",
      "state: [ 0.00211296 -0.74443616  0.07142107  1.21586672] reward: 1.0 action: 0\n",
      "value_next [489421.53] td_target [464951.44] td_error [56481.875]\n",
      "state: [-0.01277577 -0.94040297  0.0957384   1.53004749] reward: 1.0 action: 0\n",
      "value_next [574987.94] td_target [546239.56] td_error [54574.03]\n",
      "state: [-0.03158382 -1.13654011  0.12633935  1.85101001] reward: 1.0 action: 0\n",
      "value_next [663611.06] td_target [630431.5] td_error [52791.125]\n",
      "state: [-0.05431463 -1.33280554  0.16335955  2.18010678] reward: 1.0 action: 0\n",
      "value_next [755602.9] td_target [717823.75] td_error [51093.938]\n",
      "state: [-0.08097074 -1.52909483  0.20696169  2.51843515] reward: 1.0 action: 0\n",
      "value_next [851248.4] td_target [808686.94] td_error [49430.625]\n",
      "state: [-0.11155263 -1.7252234   0.25733039  2.86676314] reward: 1.0 action: 0\n",
      "value_next [950785.06] td_target [903246.8] td_error [47735.188]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 74/200\n",
      "======================\n",
      "state: [-0.04800604 -0.1461883  -0.01003447  0.24675269] reward: 1.0 action: 0\n",
      "value_next [251270.81] td_target [238708.27] td_error [63927.22]\n",
      "state: [-0.0509298  -0.34116552 -0.00509942  0.5362537 ] reward: 1.0 action: 0\n",
      "value_next [330435.38] td_target [313914.6] td_error [61443.64]\n",
      "state: [-0.05775311 -0.53621539  0.00562566  0.82732549] reward: 1.0 action: 0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "value_next [411698.03] td_target [391114.12] td_error [59156.188]\n",
      "state: [-0.06847742 -0.73141382  0.02217217  1.12177242] reward: 1.0 action: 0\n",
      "value_next [495370.88] td_target [470603.3] td_error [57058.25]\n",
      "state: [-0.0831057  -0.9268194   0.04460761  1.42132683] reward: 1.0 action: 0\n",
      "value_next [581788.56] td_target [552700.1] td_error [55134.72]\n",
      "state: [-0.10164209 -1.12246388  0.07303415  1.72761193] reward: 1.0 action: 0\n",
      "value_next [671295.5] td_target [637731.7] td_error [53361.]\n",
      "state: [-0.12409136 -1.31834074  0.10758639  2.04209692] reward: 1.0 action: 0\n",
      "value_next [764232.94] td_target [726022.3] td_error [51702.875]\n",
      "state: [-0.15045818 -1.51439149  0.14842833  2.36604216] reward: 1.0 action: 0\n",
      "value_next [860923.44] td_target [817878.25] td_error [50114.312]\n",
      "state: [-0.18074601 -1.71048951  0.19574917  2.70043283] reward: 1.0 action: 0\n",
      "value_next [961653.5] td_target [913571.8] td_error [48536.312]\n",
      "state: [-0.2149558  -1.90642149  0.24975783  3.04590143] reward: 1.0 action: 0\n",
      "value_next [1066651.6] td_target [1013320.] td_error [46894.688]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 75/200\n",
      "======================\n",
      "state: [-0.04401396 -0.2188916  -0.02112917  0.3061178 ] reward: 1.0 action: 0\n",
      "value_next [283174.16] td_target [269016.44] td_error [65616.875]\n",
      "state: [-0.0483918  -0.41370619 -0.01500681  0.59206294] reward: 1.0 action: 0\n",
      "value_next [365897.] td_target [347603.16] td_error [63086.938]\n",
      "state: [-0.05666592 -0.60861487 -0.00316555  0.87998121] reward: 1.0 action: 0\n",
      "value_next [450878.06] td_target [428335.16] td_error [60762.53]\n",
      "state: [-0.06883822 -0.80369367  0.01443407  1.17166727] reward: 1.0 action: 0\n",
      "value_next [538449.06] td_target [511527.6] td_error [58634.656]\n",
      "state: [-0.08491209 -0.99900029  0.03786742  1.46884015] reward: 1.0 action: 0\n",
      "value_next [628961.8] td_target [597514.7] td_error [56685.875]\n",
      "state: [-0.1048921  -1.1945646   0.06724422  1.77310668] reward: 1.0 action: 0\n",
      "value_next [722775.7] td_target [686637.9] td_error [54889.25]\n",
      "state: [-0.12878339 -1.39037724  0.10270635  2.08591673] reward: 1.0 action: 0\n",
      "value_next [820244.5] td_target [779233.25] td_error [53208.]\n",
      "state: [-0.15659093 -1.58637585  0.14442469  2.40850798] reward: 1.0 action: 0\n",
      "value_next [921700.5] td_target [875616.44] td_error [51592.812]\n",
      "state: [-0.18831845 -1.78242871  0.19259485  2.74183898] reward: 1.0 action: 0\n",
      "value_next [1027435.94] td_target [976065.1] td_error [49980.938]\n",
      "state: [-0.22396702 -1.978316    0.24743163  3.08651072] reward: 1.0 action: 0\n",
      "value_next [1137681.] td_target [1080797.9] td_error [48293.562]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 76/200\n",
      "=======================\n",
      "state: [ 0.02999599 -0.23489214 -0.04847125  0.31608148] reward: 1.0 action: 0\n",
      "value_next [288135.3] td_target [273729.53] td_error [67598.72]\n",
      "state: [ 0.02529815 -0.42929136 -0.04214962  0.59309286] reward: 1.0 action: 0\n",
      "value_next [373152.5] td_target [354495.88] td_error [65024.062]\n",
      "state: [ 0.01671232 -0.62379873 -0.03028777  0.87220646] reward: 1.0 action: 0\n",
      "value_next [460518.1] td_target [437493.2] td_error [62664.344]\n",
      "state: [ 0.00423634 -0.81849599 -0.01284364  1.15521516] reward: 1.0 action: 0\n",
      "value_next [550574.06] td_target [523046.34] td_error [60510.22]\n",
      "state: [-0.01213358 -1.01344813  0.01026067  1.44384333] reward: 1.0 action: 0\n",
      "value_next [643682.56] td_target [611499.44] td_error [58544.25]\n",
      "state: [-0.03240254 -1.20869485  0.03913753  1.73971458] reward: 1.0 action: 0\n",
      "value_next [740214.7] td_target [703204.94] td_error [56740.25]\n",
      "state: [-0.05657643 -1.40424008  0.07393182  2.0443113 ] reward: 1.0 action: 0\n",
      "value_next [840538.5] td_target [798512.56] td_error [55062.938]\n",
      "state: [-0.08466124 -1.60003913  0.11481805  2.35892374] reward: 1.0 action: 0\n",
      "value_next [945003.25] td_target [897754.06] td_error [53464.5]\n",
      "state: [-0.11666202 -1.79598317  0.16199653  2.68458693] reward: 1.0 action: 0\n",
      "value_next [1053923.4] td_target [1001228.2] td_error [51884.688]\n",
      "state: [-0.15258168 -1.99188101  0.21568826  3.02200537] reward: 1.0 action: 0\n",
      "value_next [1167555.6] td_target [1109178.9] td_error [50246.25]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 77/200\n",
      "=======================\n",
      "state: [-3.97471802e-02 -2.11185276e-01  1.19833672e-06  2.44492897e-01] reward: 1.0 action: 0\n",
      "value_next [299895.2] td_target [284901.44] td_error [73631.33]\n",
      "state: [-0.04397089 -0.40630724  0.00489106  0.5371762 ] reward: 1.0 action: 0\n",
      "value_next [391563.6] td_target [371986.4] td_error [70734.56]\n",
      "state: [-0.05209703 -0.60149762  0.01563458  0.83139623] reward: 1.0 action: 0\n",
      "value_next [485622.] td_target [461341.9] td_error [68062.59]\n",
      "state: [-0.06412698 -0.79682972  0.0322625   1.12895495] reward: 1.0 action: 0\n",
      "value_next [582428.1] td_target [553307.7] td_error [65607.375]\n",
      "state: [-0.08006358 -0.99235905  0.0548416   1.43157984] reward: 1.0 action: 0\n",
      "value_next [682362.56] td_target [648245.44] td_error [63350.25]\n",
      "state: [-0.09991076 -1.1881133   0.0834732   1.74088512] reward: 1.0 action: 0\n",
      "value_next [785814.75] td_target [746525.] td_error [61262.125]\n",
      "state: [-0.12367302 -1.38408062  0.1182909   2.05832523] reward: 1.0 action: 0\n",
      "value_next [893167.25] td_target [848509.9] td_error [59301.875]\n",
      "state: [-0.15135464 -1.5801956   0.15945741  2.38513843] reward: 1.0 action: 0\n",
      "value_next [1004778.56] td_target [954540.6] td_error [57415.438]\n",
      "state: [-0.18295855 -1.77632276  0.20716018  2.72227901] reward: 1.0 action: 0\n",
      "value_next [1120961.9] td_target [1064914.8] td_error [55532.812]\n",
      "state: [-0.218485   -1.97223787  0.26160576  3.07033878] reward: 1.0 action: 0\n",
      "value_next [1241961.1] td_target [1179864.] td_error [53567.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 78/200\n",
      "=======================\n",
      "state: [ 0.04510904 -0.22245601 -0.01759662  0.27253028] reward: 1.0 action: 0\n",
      "value_next [306742.06] td_target [291405.97] td_error [76317.45]\n",
      "state: [ 0.04065992 -0.41732251 -0.01214602  0.55961164] reward: 1.0 action: 0\n",
      "value_next [401556.06] td_target [381479.25] td_error [73358.91]\n",
      "state: [ 0.03231347 -0.61227189 -0.00095378  0.8484433 ] reward: 1.0 action: 0\n",
      "value_next [498883.1] td_target [473939.94] td_error [70636.84]\n",
      "state: [ 0.02006803 -0.80738082  0.01601508  1.14082615] reward: 1.0 action: 0\n",
      "value_next [599095.06] td_target [569141.3] td_error [68141.19]\n",
      "state: [ 0.00392041 -1.00270841  0.03883161  1.4384882 ] reward: 1.0 action: 0\n",
      "value_next [702585.94] td_target [667457.6] td_error [65852.06]\n",
      "state: [-0.01613375 -1.19828677  0.06760137  1.7430482 ] reward: 1.0 action: 0\n",
      "value_next [809758.25] td_target [769271.3] td_error [63739.188]\n",
      "state: [-0.04009949 -1.39410966  0.10246233  2.05597129] reward: 1.0 action: 0\n",
      "value_next [921007.5] td_target [874958.1] td_error [61761.062]\n",
      "state: [-0.06798168 -1.59011896  0.14358176  2.37851447] reward: 1.0 action: 0\n",
      "value_next [1036705.6] td_target [984871.3] td_error [59862.375]\n",
      "state: [-0.09978406 -1.7861884   0.19115205  2.7116604 ] reward: 1.0 action: 0\n",
      "value_next [1157180.] td_target [1099322.] td_error [57972.875]\n",
      "state: [-0.13550783 -1.98210507  0.24538526  3.05603984] reward: 1.0 action: 0\n",
      "value_next [1282689.] td_target [1218555.5] td_error [56003.375]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 79/200\n",
      "========================\n",
      "state: [-0.05016843 -0.2210397   0.04153878  0.2717816 ] reward: 1.0 action: 0\n",
      "value_next [342634.22] td_target [325503.5] td_error [82460.45]\n",
      "state: [-0.05458923 -0.41672901  0.04697441  0.57727124] reward: 1.0 action: 0\n",
      "value_next [445648.7] td_target [423367.25] td_error [79211.56]\n",
      "state: [-0.06292381 -0.6124768   0.05851983  0.88437444] reward: 1.0 action: 0\n",
      "value_next [551335.3] td_target [523769.53] td_error [76201.16]\n",
      "state: [-0.07517334 -0.80834244  0.07620732  1.194865  ] reward: 1.0 action: 0\n",
      "value_next [660083.06] td_target [627079.9] td_error [73418.75]\n",
      "state: [-0.09134019 -1.00436391  0.10010462  1.51042708] reward: 1.0 action: 0\n",
      "value_next [772302.56] td_target [733688.44] td_error [70840.44]\n",
      "state: [-0.11142747 -1.20054613  0.13031316  1.83260883] reward: 1.0 action: 0\n",
      "value_next [888408.2] td_target [843988.75] td_error [68429.06]\n",
      "state: [-0.13543839 -1.39684757  0.16696534  2.16276813] reward: 1.0 action: 0\n",
      "value_next [1008798.6] td_target [958359.7] td_error [66132.875]\n",
      "state: [-0.16337534 -1.59316465  0.2102207   2.50200871] reward: 1.0 action: 0\n",
      "value_next [1133834.5] td_target [1077143.8] td_error [63883.938]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 80/200\n",
      "========================\n",
      "state: [ 0.01842006 -0.22110932  0.04745909  0.32080661] reward: 1.0 action: 0\n",
      "value_next [355576.47] td_target [337798.66] td_error [85786.53]\n",
      "state: [ 0.01399787 -0.41687388  0.05387523  0.62807055] reward: 1.0 action: 0\n",
      "value_next [462679.28] td_target [439546.3] td_error [82443.22]\n",
      "state: [ 0.00566039 -0.61270471  0.06643664  0.93722225] reward: 1.0 action: 0\n",
      "value_next [572574.9] td_target [543947.1] td_error [79347.97]\n",
      "state: [-0.0065937  -0.80865658  0.08518108  1.25001962] reward: 1.0 action: 0\n",
      "value_next [685671.25] td_target [651388.7] td_error [76486.69]\n",
      "state: [-0.02276683 -1.00476069  0.11018147  1.56812223] reward: 1.0 action: 0\n",
      "value_next [802394.7] td_target [762275.94] td_error [73831.44]\n",
      "state: [-0.04286205 -1.20101263  0.14154392  1.89304292] reward: 1.0 action: 0\n",
      "value_next [923171.1] td_target [877013.56] td_error [71340.69]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.0668823  -1.39735839  0.17940478  2.22609129] reward: 1.0 action: 0\n",
      "value_next [1048404.9] td_target [995985.6] td_error [68957.]\n",
      "state: [-0.09482947 -1.5936783   0.2239266   2.56830729] reward: 1.0 action: 0\n",
      "value_next [1178455.8] td_target [1119534.] td_error [66606.]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 81/200\n",
      "========================\n",
      "state: [ 0.02563978 -0.18997378 -0.03525327  0.330112  ] reward: 1.0 action: 0\n",
      "value_next [348557.88] td_target [331130.97] td_error [83815.58]\n",
      "state: [ 0.02184031 -0.38457662 -0.02865103  0.61147259] reward: 1.0 action: 0\n",
      "value_next [453280.28] td_target [430617.25] td_error [80597.28]\n",
      "state: [ 0.01414878 -0.57928666 -0.01642158  0.89499552] reward: 1.0 action: 0\n",
      "value_next [560795.2] td_target [532756.44] td_error [77647.5]\n",
      "state: [ 0.00256304 -0.77418213  0.00147833  1.18247159] reward: 1.0 action: 0\n",
      "value_next [671525.06] td_target [637949.8] td_error [74952.69]\n",
      "state: [-0.0129206  -0.96932324  0.02512777  1.47561756] reward: 1.0 action: 0\n",
      "value_next [785913.25] td_target [746618.56] td_error [72489.125]\n",
      "state: [-0.03230706 -1.164743    0.05464012  1.77604146] reward: 1.0 action: 0\n",
      "value_next [904409.8] td_target [859190.3] td_error [70222.25]\n",
      "state: [-0.05560192 -1.36043624  0.09016095  2.08519944] reward: 1.0 action: 0\n",
      "value_next [1027456.2] td_target [976084.4] td_error [68105.31]\n",
      "state: [-0.08281065 -1.55634608  0.13186494  2.40434201] reward: 1.0 action: 0\n",
      "value_next [1155465.8] td_target [1097693.5] td_error [66076.81]\n",
      "state: [-0.11393757 -1.75234784  0.17995178  2.73444827] reward: 1.0 action: 0\n",
      "value_next [1288803.2] td_target [1224364.1] td_error [64058.375]\n",
      "state: [-0.14898453 -1.94823041  0.23464074  3.07614831] reward: 1.0 action: 0\n",
      "value_next [1427755.4] td_target [1356368.6] td_error [61951.625]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 82/200\n",
      "=========================\n",
      "state: [ 0.01724914 -0.1696615   0.00706518  0.25075962] reward: 1.0 action: 0\n",
      "value_next [349599.5] td_target [332120.53] td_error [90870.61]\n",
      "state: [ 0.01385591 -0.36488363  0.01208037  0.54566265] reward: 1.0 action: 0\n",
      "value_next [461440.8] td_target [438369.78] td_error [87306.]\n",
      "state: [ 0.00655824 -0.56017322  0.02299362  0.8421272 ] reward: 1.0 action: 0\n",
      "value_next [576129.25] td_target [547323.75] td_error [84017.19]\n",
      "state: [-0.00464523 -0.75560135  0.03983617  1.14195144] reward: 1.0 action: 0\n",
      "value_next [694095.8] td_target [659392.] td_error [80992.81]\n",
      "state: [-0.01975726 -0.95122064  0.06267519  1.44685631] reward: 1.0 action: 0\n",
      "value_next [815796.44] td_target [775007.6] td_error [78208.81]\n",
      "state: [-0.03878167 -1.14705496  0.09161232  1.75844546] reward: 1.0 action: 0\n",
      "value_next [941694.6] td_target [894610.9] td_error [75628.]\n",
      "state: [-0.06172277 -1.34308739  0.12678123  2.0781574 ] reward: 1.0 action: 0\n",
      "value_next [1072242.] td_target [1018630.9] td_error [73198.75]\n",
      "state: [-0.08858452 -1.53924593  0.16834438  2.40720757] reward: 1.0 action: 0\n",
      "value_next [1207858.1] td_target [1147466.2] td_error [70853.375]\n",
      "state: [-0.11936943 -1.73538672  0.21648853  2.74651944] reward: 1.0 action: 0\n",
      "value_next [1348904.2] td_target [1281460.] td_error [68505.125]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 83/200\n",
      "=========================\n",
      "state: [-0.02441266 -0.20399507 -0.01643247  0.24933081] reward: 1.0 action: 0\n",
      "value_next [372811.94] td_target [354172.34] td_error [92073.516]\n",
      "state: [-0.02849256 -0.39887854 -0.01144586  0.53678562] reward: 1.0 action: 0\n",
      "value_next [487102.25] td_target [462748.12] td_error [88415.125]\n",
      "state: [-3.64701355e-02 -5.93837709e-01 -7.10142675e-04  8.25840204e-01] reward: 1.0 action: 0\n",
      "value_next [604281.56] td_target [574068.5] td_error [85050.125]\n",
      "state: [-0.04834689 -0.78894994  0.01580666  1.1182997 ] reward: 1.0 action: 0\n",
      "value_next [724803.06] td_target [688563.9] td_error [81966.25]\n",
      "state: [-0.06412589 -0.98427569  0.03817266  1.41589868] reward: 1.0 action: 0\n",
      "value_next [849145.25] td_target [806689.] td_error [79139.69]\n",
      "state: [-0.0838114  -1.17984908  0.06649063  1.72026509] reward: 1.0 action: 0\n",
      "value_next [977795.] td_target [928906.25] td_error [76533.94]\n",
      "state: [-0.10740838 -1.37566671  0.10089593  2.03287635] reward: 1.0 action: 0\n",
      "value_next [1111231.] td_target [1055670.4] td_error [74099.06]\n",
      "state: [-0.13492172 -1.57167425  0.14155346  2.35500551] reward: 1.0 action: 0\n",
      "value_next [1249902.1] td_target [1187408.] td_error [71769.25]\n",
      "state: [-0.1663552  -1.76775036  0.18865357  2.68765582] reward: 1.0 action: 0\n",
      "value_next [1394203.8] td_target [1324494.5] td_error [69460.125]\n",
      "state: [-0.20171021 -1.96368828  0.24240668  3.03148383] reward: 1.0 action: 0\n",
      "value_next [1544448.9] td_target [1467227.4] td_error [67066.125]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 84/200\n",
      "=========================\n",
      "state: [-0.03764946 -0.18382338  0.02984155  0.31902375] reward: 1.0 action: 0\n",
      "value_next [410699.16] td_target [390165.2] td_error [98365.94]\n",
      "state: [-0.04132593 -0.37935735  0.03622202  0.62096624] reward: 1.0 action: 0\n",
      "value_next [533590.75] td_target [506912.22] td_error [94524.28]\n",
      "state: [-0.04891308 -0.57496593  0.04864135  0.92483359] reward: 1.0 action: 0\n",
      "value_next [659673.3] td_target [626690.6] td_error [90975.19]\n",
      "state: [-0.0604124  -0.77070993  0.06713802  1.23239724] reward: 1.0 action: 0\n",
      "value_next [789416.56] td_target [749946.75] td_error [87702.44]\n",
      "state: [-0.07582659 -0.96662792  0.09178596  1.5453359 ] reward: 1.0 action: 0\n",
      "value_next [923312.1] td_target [877147.5] td_error [84675.75]\n",
      "state: [-0.09515915 -1.16272475  0.12269268  1.86519017] reward: 1.0 action: 0\n",
      "value_next [1061851.6] td_target [1008760.] td_error [81848.25]\n",
      "state: [-0.11841365 -1.35895824  0.15999649  2.1933089 ] reward: 1.0 action: 0\n",
      "value_next [1205505.6] td_target [1145231.4] td_error [79157.375]\n",
      "state: [-0.14559281 -1.55522361  0.20386266  2.53078539] reward: 1.0 action: 0\n",
      "value_next [1354696.9] td_target [1286963.] td_error [76520.25]\n",
      "state: [-0.17669728 -1.7513357   0.25447837  2.87838318] reward: 1.0 action: 0\n",
      "value_next [1509770.8] td_target [1434283.2] td_error [73832.75]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 85/200\n",
      "==========================\n",
      "state: [ 0.03711294 -0.1784698  -0.01094427  0.30047772] reward: 1.0 action: 0\n",
      "value_next [398862.38] td_target [378920.25] td_error [100014.375]\n",
      "state: [ 0.03354354 -0.37343406 -0.00493472  0.58968905] reward: 1.0 action: 0\n",
      "value_next [522746.] td_target [496609.7] td_error [96136.125]\n",
      "state: [ 0.02607486 -0.56848657  0.00685907  0.88081344] reward: 1.0 action: 0\n",
      "value_next [649847.3] td_target [617355.94] td_error [92570.5]\n",
      "state: [ 0.01470513 -0.76370102  0.02447533  1.17564479] reward: 1.0 action: 0\n",
      "value_next [780652.3] td_target [741620.7] td_error [89301.75]\n",
      "state: [-5.68889991e-04 -9.59132283e-01  4.79882302e-02  1.47589891e+00] reward: 1.0 action: 0\n",
      "value_next [915670.44] td_target [869887.9] td_error [86301.31]\n",
      "state: [-0.01975154 -1.15480651  0.07750621  1.78317548] reward: 1.0 action: 0\n",
      "value_next [1055416.2] td_target [1002646.44] td_error [83527.06]\n",
      "state: [-0.04284767 -1.35070943  0.11316972  2.0989117 ] reward: 1.0 action: 0\n",
      "value_next [1200390.2] td_target [1140371.8] td_error [80920.875]\n",
      "state: [-0.06986185 -1.54677227  0.15514795  2.42432559] reward: 1.0 action: 0\n",
      "value_next [1351056.] td_target [1283504.1] td_error [78407.75]\n",
      "state: [-0.1007973  -1.74285508  0.20363446  2.76034772] reward: 1.0 action: 0\n",
      "value_next [1507812.8] td_target [1432423.1] td_error [75891.875]\n",
      "state: [-0.1356544  -1.93872793  0.25884142  3.10754194] reward: 1.0 action: 0\n",
      "value_next [1670963.5] td_target [1587416.2] td_error [73254.125]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 86/200\n",
      "==========================\n",
      "state: [ 0.03361132 -0.23479156 -0.04128334  0.3037959 ] reward: 1.0 action: 0\n",
      "value_next [424817.75] td_target [403577.84] td_error [101138.31]\n",
      "state: [ 0.02891549 -0.4293016  -0.03520742  0.5831785 ] reward: 1.0 action: 0\n",
      "value_next [551288.7] td_target [523725.25] td_error [97199.78]\n",
      "state: [ 0.02032945 -0.62391307 -0.02354385  0.86456597] reward: 1.0 action: 0\n",
      "value_next [681069.8] td_target [647017.3] td_error [93587.75]\n",
      "state: [ 0.00785119 -0.81870677 -0.00625253  1.14975428] reward: 1.0 action: 0\n",
      "value_next [814663.8] td_target [773931.6] td_error [90286.625]\n",
      "state: [-0.00852294 -1.01374655  0.01674256  1.44047001] reward: 1.0 action: 0\n",
      "value_next [952597.7] td_target [904968.8] td_error [87268.75]\n",
      "state: [-0.02879787 -1.20907068  0.04555196  1.73833721] reward: 1.0 action: 0\n",
      "value_next [1095406.6] td_target [1040637.25] td_error [84493.94]\n",
      "state: [-0.05297929 -1.40468108  0.0803187   2.0448361 ] reward: 1.0 action: 0\n",
      "value_next [1243616.] td_target [1181436.1] td_error [81908.]\n",
      "state: [-0.08107291 -1.60053043  0.12121542  2.36125136] reward: 1.0 action: 0\n",
      "value_next [1397719.4] td_target [1327834.4] td_error [79439.375]\n",
      "state: [-0.11308352 -1.7965064   0.16844045  2.68860846] reward: 1.0 action: 0\n",
      "value_next [1558154.1] td_target [1480247.4] td_error [76996.75]\n",
      "state: [-0.14901365 -1.99241344  0.22221262  3.02759779] reward: 1.0 action: 0\n",
      "value_next [1725269.4] td_target [1639006.9] td_error [74465.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 87/200\n",
      "==========================\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [ 0.01501728 -0.1514205   0.01094453  0.29696696] reward: 1.0 action: 0\n",
      "value_next [431915.47] td_target [410320.7] td_error [110018.125]\n",
      "state: [ 0.01198887 -0.34669675  0.01688387  0.59308142] reward: 1.0 action: 0\n",
      "value_next [567717.8] td_target [539332.94] td_error [105727.94]\n",
      "state: [ 0.00505494 -0.54205093  0.0287455   0.89103454] reward: 1.0 action: 0\n",
      "value_next [706985.6] td_target [671637.3] td_error [101770.44]\n",
      "state: [-0.00578608 -0.73755082  0.04656619  1.19261325] reward: 1.0 action: 0\n",
      "value_next [850237.7] td_target [807726.8] td_error [98129.375]\n",
      "state: [-0.0205371  -0.93324402  0.07041846  1.49952036] reward: 1.0 action: 0\n",
      "value_next [998017.2] td_target [948117.3] td_error [94772.19]\n",
      "state: [-0.03920198 -1.12914722  0.10040887  1.81333281] reward: 1.0 action: 0\n",
      "value_next [1150870.6] td_target [1093328.1] td_error [91650.94]\n",
      "state: [-0.06178492 -1.32523379  0.13667552  2.13545183] reward: 1.0 action: 0\n",
      "value_next [1309325.] td_target [1243859.8] td_error [88699.25]\n",
      "state: [-0.0882896  -1.52141889  0.17938456  2.46704279] reward: 1.0 action: 0\n",
      "value_next [1473861.9] td_target [1400169.8] td_error [85831.625]\n",
      "state: [-0.11871798 -1.71754234  0.22872541  2.80896405] reward: 1.0 action: 0\n",
      "value_next [1644885.5] td_target [1562642.2] td_error [82938.125]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 88/200\n",
      "==========================\n",
      "state: [ 0.01494662 -0.15779324 -0.01472602  0.27965502] reward: 1.0 action: 0\n",
      "value_next [438449.53] td_target [416528.06] td_error [111941.375]\n",
      "state: [ 0.01179075 -0.35270206 -0.00913292  0.56765726] reward: 1.0 action: 0\n",
      "value_next [576488.44] td_target [547665.] td_error [107538.75]\n",
      "state: [ 0.00473671 -0.54769472  0.00222023  0.85744901] reward: 1.0 action: 0\n",
      "value_next [718002.] td_target [682102.9] td_error [103490.69]\n",
      "state: [-0.00621718 -0.74284685  0.01936921  1.15082923] reward: 1.0 action: 0\n",
      "value_next [863531.3] td_target [820355.75] td_error [99780.25]\n",
      "state: [-0.02107412 -0.93821613  0.04238579  1.44952236] reward: 1.0 action: 0\n",
      "value_next [1013643.1] td_target [962961.94] td_error [96376.125]\n",
      "state: [-0.03983844 -1.13383267  0.07137624  1.75514133] reward: 1.0 action: 0\n",
      "value_next [1168911.2] td_target [1110466.6] td_error [93232.75]\n",
      "state: [-0.0625151  -1.32968757  0.10647907  2.06914245] reward: 1.0 action: 0\n",
      "value_next [1329894.] td_target [1263400.2] td_error [90287.]\n",
      "state: [-0.08910885 -1.52571914  0.14786192  2.39277018] reward: 1.0 action: 0\n",
      "value_next [1497111.5] td_target [1422256.9] td_error [87457.75]\n",
      "state: [-0.11962323 -1.72179657  0.19571732  2.72699021] reward: 1.0 action: 0\n",
      "value_next [1671013.6] td_target [1587463.9] td_error [84639.25]\n",
      "state: [-0.15405916 -1.91770125  0.25025712  3.07241128] reward: 1.0 action: 0\n",
      "value_next [1851947.6] td_target [1759351.2] td_error [81702.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 89/200\n",
      "===========================\n",
      "state: [ 0.02730349 -0.224557   -0.02285081  0.3129352 ] reward: 1.0 action: 0\n",
      "value_next [479570.] td_target [455592.5] td_error [114535.97]\n",
      "state: [ 0.02281235 -0.41934609 -0.01659211  0.59832501] reward: 1.0 action: 0\n",
      "value_next [622565.4] td_target [591438.1] td_error [110036.31]\n",
      "state: [ 0.01442542 -0.614232   -0.00462561  0.88573578] reward: 1.0 action: 0\n",
      "value_next [769226.75] td_target [730766.4] td_error [105903.5]\n",
      "state: [ 0.00214078 -0.80929085  0.01308911  1.176961  ] reward: 1.0 action: 0\n",
      "value_next [920117.2] td_target [874112.3] td_error [102118.19]\n",
      "state: [-0.01404503 -1.00458036  0.03662833  1.47371826] reward: 1.0 action: 0\n",
      "value_next [1075824.6] td_target [1022034.4] td_error [98647.25]\n",
      "state: [-0.03413664 -1.20013036  0.06610269  1.77761284] reward: 1.0 action: 0\n",
      "value_next [1236940.5] td_target [1175094.5] td_error [95441.875]\n",
      "state: [-0.05813925 -1.39593142  0.10165495  2.09009302] reward: 1.0 action: 0\n",
      "value_next [1404040.4] td_target [1333839.4] td_error [92437.125]\n",
      "state: [-0.08605787 -1.59192104  0.14345681  2.4123949 ] reward: 1.0 action: 0\n",
      "value_next [1577657.2] td_target [1498775.4] td_error [89547.125]\n",
      "state: [-0.1178963  -1.78796735  0.19170471  2.74547538] reward: 1.0 action: 0\n",
      "value_next [1758252.] td_target [1670340.4] td_error [86663.875]\n",
      "state: [-0.15365564 -1.98385031  0.24661422  3.0899338 ] reward: 1.0 action: 0\n",
      "value_next [1946176.6] td_target [1848868.8] td_error [83651.625]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 90/200\n",
      "===========================\n",
      "state: [-0.00539039 -0.22618424 -0.02301653  0.27773731] reward: 1.0 action: 0\n",
      "value_next [494476.72] td_target [469753.88] td_error [118983.84]\n",
      "state: [-0.00991407 -0.4209704  -0.01746179  0.5630729 ] reward: 1.0 action: 0\n",
      "value_next [642723.3] td_target [610588.1] td_error [114241.5]\n",
      "state: [-0.01833348 -0.61584303 -0.00620033  0.85020377] reward: 1.0 action: 0\n",
      "value_next [794689.25] td_target [754955.75] td_error [109883.25]\n",
      "state: [-0.03065034 -0.81087988  0.01080374  1.14093054] reward: 1.0 action: 0\n",
      "value_next [950957.7] td_target [903410.8] td_error [105890.75]\n",
      "state: [-0.04686794 -1.00614138  0.03362236  1.43698193] reward: 1.0 action: 0\n",
      "value_next [1112138.2] td_target [1056532.4] td_error [102230.875]\n",
      "state: [-0.06699076 -1.20166127  0.06236199  1.73997912] reward: 1.0 action: 0\n",
      "value_next [1278847.] td_target [1214905.6] td_error [98854.875]\n",
      "state: [-0.09102399 -1.39743555  0.09716158  2.05139225] reward: 1.0 action: 0\n",
      "value_next [1451685.2] td_target [1379102.] td_error [95697.625]\n",
      "state: [-0.1189727  -1.59340897  0.13818942  2.3724865 ] reward: 1.0 action: 0\n",
      "value_next [1631212.9] td_target [1549653.2] td_error [92672.25]\n",
      "state: [-0.15084088 -1.78945898  0.18563915  2.70425662] reward: 1.0 action: 0\n",
      "value_next [1817918.4] td_target [1727023.4] td_error [89669.5]\n",
      "state: [-0.18663006 -1.98537727  0.23972428  3.04734976] reward: 1.0 action: 0\n",
      "value_next [2012181.5] td_target [1911573.4] td_error [86552.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 91/200\n",
      "===========================\n",
      "state: [ 0.01158676 -0.18385527 -0.0368654   0.29688648] reward: 1.0 action: 0\n",
      "value_next [498916.2] td_target [473971.38] td_error [122605.16]\n",
      "state: [ 0.00790965 -0.37843283 -0.03092767  0.57771853] reward: 1.0 action: 0\n",
      "value_next [651097.94] td_target [618544.06] td_error [117774.875]\n",
      "state: [ 3.40998263e-04 -5.73107964e-01 -1.93732982e-02  8.60500302e-01] reward: 1.0 action: 0\n",
      "value_next [807133.5] td_target [766777.8] td_error [113343.81]\n",
      "state: [-0.01112116 -0.76796079 -0.00216329  1.1470293 ] reward: 1.0 action: 0\n",
      "value_next [967624.9] td_target [919244.6] td_error [109292.5]\n",
      "state: [-0.02648038 -0.96305442  0.02077729  1.43903306] reward: 1.0 action: 0\n",
      "value_next [1133200.9] td_target [1076541.9] td_error [105586.69]\n",
      "state: [-0.04574147 -1.15842612  0.04955795  1.73813544] reward: 1.0 action: 0\n",
      "value_next [1304497.4] td_target [1239273.5] td_error [102177.125]\n",
      "state: [-0.06890999 -1.3540765   0.08432066  2.04581477] reward: 1.0 action: 0\n",
      "value_next [1482135.5] td_target [1408029.8] td_error [98997.]\n",
      "state: [-0.09599152 -1.54995645  0.12523696  2.3633517 ] reward: 1.0 action: 0\n",
      "value_next [1666696.4] td_target [1583362.5] td_error [95959.25]\n",
      "state: [-0.12699065 -1.74595135  0.17250399  2.69176501] reward: 1.0 action: 0\n",
      "value_next [1858691.2] td_target [1765757.6] td_error [92953.875]\n",
      "state: [-0.16190967 -1.94186274  0.22633929  3.03173556] reward: 1.0 action: 0\n",
      "value_next [2058523.2] td_target [1955598.1] td_error [89841.75]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 92/200\n",
      "============================\n",
      "state: [-0.01005107 -0.19563694 -0.01367048  0.24937699] reward: 1.0 action: 0\n",
      "value_next [516128.1] td_target [490322.7] td_error [129628.16]\n",
      "state: [-0.01396381 -0.39056102 -0.00868294  0.5377168 ] reward: 1.0 action: 0\n",
      "value_next [676224.] td_target [642413.8] td_error [124409.625]\n",
      "state: [-0.02177503 -0.58555983  0.0020714   0.82765123] reward: 1.0 action: 0\n",
      "value_next [840216.4] td_target [798206.56] td_error [119608.375]\n",
      "state: [-0.03348622 -0.78071004  0.01862442  1.12098491] reward: 1.0 action: 0\n",
      "value_next [1008730.8] td_target [958295.25] td_error [115205.]\n",
      "state: [-0.04910042 -0.97607124  0.04104412  1.41945116] reward: 1.0 action: 0\n",
      "value_next [1182421.5] td_target [1123301.4] td_error [111164.25]\n",
      "state: [-0.06862185 -1.17167641  0.06943315  1.72467539] reward: 1.0 action: 0\n",
      "value_next [1361949.1] td_target [1293852.6] td_error [107433.5]\n",
      "state: [-0.09205538 -1.36752063  0.10392665  2.03813084] reward: 1.0 action: 0\n",
      "value_next [1547957.6] td_target [1470560.8] td_error [103941.375]\n",
      "state: [-0.11940579 -1.56354755  0.14468927  2.36108418] reward: 1.0 action: 0\n",
      "value_next [1741045.] td_target [1653993.8] td_error [100594.875]\n",
      "state: [-0.15067674 -1.75963325  0.19191095  2.69452969] reward: 1.0 action: 0\n",
      "value_next [1941731.2] td_target [1844645.6] td_error [97274.5]\n",
      "state: [-0.18586941 -1.95556779  0.24580155  3.03911203] reward: 1.0 action: 0\n",
      "value_next [2150418.] td_target [2042898.1] td_error [93832.625]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 93/200\n",
      "============================\n",
      "state: [ 0.04678579 -0.15688679 -0.0063079   0.24674768] reward: 1.0 action: 0\n",
      "value_next [509588.8] td_target [484110.38] td_error [136213.72]\n",
      "state: [ 0.04364805 -0.35191809 -0.00137295  0.5374343 ] reward: 1.0 action: 0\n",
      "value_next [675992.8] td_target [642194.2] td_error [130775.94]\n",
      "state: [ 0.03660969 -0.54702071  0.00937574  0.82968431] reward: 1.0 action: 0\n",
      "value_next [846427.56] td_target [804107.2] td_error [125769.]\n",
      "state: [ 0.02566928 -0.74226956  0.02596942  1.12530114] reward: 1.0 action: 0\n",
      "value_next [1021535.1] td_target [970459.4] td_error [121172.625]\n",
      "state: [ 0.01082389 -0.93772206  0.04847545  1.42601519] reward: 1.0 action: 0\n",
      "value_next [1201986.2] td_target [1141887.9] td_error [116949.125]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.00793056 -1.13340838  0.07699575  1.73344605] reward: 1.0 action: 0\n",
      "value_next [1388457.2] td_target [1319035.4] td_error [113043.25]\n",
      "state: [-0.03059872 -1.3293199   0.11166467  2.04905707] reward: 1.0 action: 0\n",
      "value_next [1581605.1] td_target [1502525.9] td_error [109379.125]\n",
      "state: [-0.05718512 -1.5253954   0.15264581  2.37409981] reward: 1.0 action: 0\n",
      "value_next [1782037.2] td_target [1692936.4] td_error [105858.75]\n",
      "state: [-0.08769303 -1.72150478  0.20012781  2.7095472 ] reward: 1.0 action: 0\n",
      "value_next [1990277.8] td_target [1890764.9] td_error [102356.5]\n",
      "state: [-0.12212312 -1.9174304   0.25431875  3.05601558] reward: 1.0 action: 0\n",
      "value_next [2206724.8] td_target [2096389.5] td_error [98716.375]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 94/200\n",
      "============================\n",
      "state: [-0.01799269 -0.23194493  0.01157479  0.27977096] reward: 1.0 action: 0\n",
      "value_next [585289.4] td_target [556025.9] td_error [140678.84]\n",
      "state: [-0.02263158 -0.42723006  0.01717021  0.57608196] reward: 1.0 action: 0\n",
      "value_next [760401.9] td_target [722382.75] td_error [135000.31]\n",
      "state: [-0.03117619 -0.62258844  0.02869185  0.87412412] reward: 1.0 action: 0\n",
      "value_next [939782.6] td_target [892794.5] td_error [129763.44]\n",
      "state: [-0.04362795 -0.81808849  0.04617433  1.17568774] reward: 1.0 action: 0\n",
      "value_next [1124104.8] td_target [1067900.5] td_error [124944.56]\n",
      "state: [-0.05998972 -1.01377895  0.06968808  1.48248101] reward: 1.0 action: 0\n",
      "value_next [1314067.4] td_target [1248365.] td_error [120501.625]\n",
      "state: [-0.0802653 -1.2096783  0.0993377  1.7960885] reward: 1.0 action: 0\n",
      "value_next [1510368.5] td_target [1434851.] td_error [116371.625]\n",
      "state: [-0.10445887 -1.4057623   0.13525947  2.11792176] reward: 1.0 action: 0\n",
      "value_next [1713678.1] td_target [1627995.2] td_error [112469.625]\n",
      "state: [-0.13257412 -1.60194935  0.17761791  2.44915962] reward: 1.0 action: 0\n",
      "value_next [1924603.6] td_target [1828374.4] td_error [108684.75]\n",
      "state: [-0.1646131  -1.79808337  0.2266011   2.79067763] reward: 1.0 action: 0\n",
      "value_next [2143653.] td_target [2036471.4] td_error [104878.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 95/200\n",
      "============================\n",
      "state: [-0.01139975 -0.21927697  0.02859012  0.34554213] reward: 1.0 action: 0\n",
      "value_next [625908.8] td_target [594614.4] td_error [146148.72]\n",
      "state: [-0.01578529 -0.4147937   0.03550097  0.64710173] reward: 1.0 action: 0\n",
      "value_next [808885.8] td_target [768442.5] td_error [140332.38]\n",
      "state: [-0.02408116 -0.61039183  0.048443    0.95074915] reward: 1.0 action: 0\n",
      "value_next [996421.3] td_target [946601.25] td_error [134964.12]\n",
      "state: [-0.036289   -0.80613115  0.06745798  1.25825036] reward: 1.0 action: 0\n",
      "value_next [1189212.4] td_target [1129752.8] td_error [130014.44]\n",
      "state: [-0.05241162 -1.00204836  0.09262299  1.57127539] reward: 1.0 action: 0\n",
      "value_next [1387976.2] td_target [1318578.4] td_error [125433.125]\n",
      "state: [-0.07245259 -1.19814555  0.1240485   1.89135257] reward: 1.0 action: 0\n",
      "value_next [1593420.2] td_target [1513750.2] td_error [121148.]\n",
      "state: [-0.0964155  -1.39437672  0.16187555  2.21981431] reward: 1.0 action: 0\n",
      "value_next [1806210.4] td_target [1715900.9] td_error [117060.75]\n",
      "state: [-0.12430304 -1.59063204  0.20627184  2.55773258] reward: 1.0 action: 0\n",
      "value_next [2026934.1] td_target [1925588.4] td_error [113046.25]\n",
      "state: [-0.15611568 -1.78671996  0.25742649  2.90584404] reward: 1.0 action: 0\n",
      "value_next [2256058.8] td_target [2143256.8] td_error [108947.625]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 96/200\n",
      "=============================\n",
      "state: [ 0.04066997 -0.18525793 -0.04462078  0.3226898 ] reward: 1.0 action: 0\n",
      "value_next [590992.9] td_target [561444.25] td_error [144321.25]\n",
      "state: [ 0.03696481 -0.37971703 -0.03816699  0.60097395] reward: 1.0 action: 0\n",
      "value_next [770206.8] td_target [731697.44] td_error [138652.06]\n",
      "state: [ 0.02937047 -0.57428485 -0.02614751  0.88139478] reward: 1.0 action: 0\n",
      "value_next [953943.2] td_target [906247.] td_error [133460.38]\n",
      "state: [ 0.01788477 -0.76904206 -0.00851961  1.16574423] reward: 1.0 action: 0\n",
      "value_next [1142916.2] td_target [1085771.4] td_error [128720.19]\n",
      "state: [ 0.00250393 -0.96405209  0.01479527  1.45574393] reward: 1.0 action: 0\n",
      "value_next [1337865.9] td_target [1270973.6] td_error [124389.125]\n",
      "state: [-0.01677711 -1.15935248  0.04391015  1.7530121 ] reward: 1.0 action: 0\n",
      "value_next [1539536.5] td_target [1462560.6] td_error [120407.5]\n",
      "state: [-0.03996416 -1.35494418  0.07897039  2.05902229] reward: 1.0 action: 0\n",
      "value_next [1748652.] td_target [1661220.4] td_error [116696.125]\n",
      "state: [-0.06706304 -1.5507785   0.12015084  2.37505146] reward: 1.0 action: 0\n",
      "value_next [1965886.9] td_target [1867593.5] td_error [113151.875]\n",
      "state: [-0.09807861 -1.74674142  0.16765187  2.70211608] reward: 1.0 action: 0\n",
      "value_next [2191830.] td_target [2082239.5] td_error [109643.5]\n",
      "state: [-0.13301344 -1.94263517  0.22169419  3.04089593] reward: 1.0 action: 0\n",
      "value_next [2426943.5] td_target [2305597.2] td_error [106008.25]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 97/200\n",
      "=============================\n",
      "state: [ 0.02567178 -0.17328745 -0.00657111  0.31818041] reward: 1.0 action: 0\n",
      "value_next [619950.1] td_target [588953.6] td_error [153619.97]\n",
      "state: [ 2.22060300e-02 -3.68315200e-01 -2.07497509e-04  6.08783821e-01] reward: 1.0 action: 0\n",
      "value_next [810107.] td_target [769602.6] td_error [147539.69]\n",
      "state: [ 0.01483973 -0.56343425  0.01196818  0.90140138] reward: 1.0 action: 0\n",
      "value_next [1004968.4] td_target [954720.94] td_error [141947.12]\n",
      "state: [ 0.00357104 -0.75871629  0.02999621  1.19782203] reward: 1.0 action: 0\n",
      "value_next [1205270.5] td_target [1145008.] td_error [136814.25]\n",
      "state: [-0.01160328 -0.95421333  0.05395265  1.49975323] reward: 1.0 action: 0\n",
      "value_next [1411775.] td_target [1341187.2] td_error [132094.]\n",
      "state: [-0.03068755 -1.14994755  0.08394771  1.80878173] reward: 1.0 action: 0\n",
      "value_next [1625241.2] td_target [1543980.1] td_error [127717.125]\n",
      "state: [-0.0536865  -1.34589934  0.12012335  2.12632599] reward: 1.0 action: 0\n",
      "value_next [1846398.4] td_target [1754079.4] td_error [123592.5]\n",
      "state: [-0.08060449 -1.54199288  0.16264987  2.45357807] reward: 1.0 action: 0\n",
      "value_next [2075909.8] td_target [1972115.2] td_error [119600.75]\n",
      "state: [-0.11144435 -1.73807924  0.21172143  2.79143408] reward: 1.0 action: 0\n",
      "value_next [2314332.5] td_target [2198616.8] td_error [115592.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 98/200\n",
      "=============================\n",
      "state: [-0.04440234 -0.17248882  0.0325567   0.28311509] reward: 1.0 action: 0\n",
      "value_next [655232.56] td_target [622471.94] td_error [162263.56]\n",
      "state: [-0.04785211 -0.36805964  0.038219    0.58588585] reward: 1.0 action: 0\n",
      "value_next [855904.94] td_target [813110.7] td_error [155697.88]\n",
      "state: [-0.05521331 -0.56369548  0.04993672  0.89035896] reward: 1.0 action: 0\n",
      "value_next [1061357.5] td_target [1008290.6] td_error [149631.25]\n",
      "state: [-0.06648722 -0.75945811  0.0677439   1.19831215] reward: 1.0 action: 0\n",
      "value_next [1272350.] td_target [1208733.5] td_error [144035.62]\n",
      "state: [-0.08167638 -0.95538812  0.09171014  1.51143451] reward: 1.0 action: 0\n",
      "value_next [1489668.6] td_target [1415186.1] td_error [138859.75]\n",
      "state: [-0.10078414 -1.1514935   0.12193883  1.83128142] reward: 1.0 action: 0\n",
      "value_next [1714092.4] td_target [1628388.8] td_error [134027.5]\n",
      "state: [-0.12381401 -1.34773653  0.15856446  2.15922144] reward: 1.0 action: 0\n",
      "value_next [1946361.1] td_target [1849044.] td_error [129436.25]\n",
      "state: [-0.15076874 -1.5440183   0.20174889  2.4963734 ] reward: 1.0 action: 0\n",
      "value_next [2187133.2] td_target [2077777.5] td_error [124953.5]\n",
      "state: [-0.18164911 -1.74016117  0.25167635  2.84353295] reward: 1.0 action: 0\n",
      "value_next [2436941.] td_target [2315095.] td_error [120411.5]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 99/200\n",
      "==============================\n",
      "state: [-0.03168004 -0.22242107  0.00308229  0.27492297] reward: 1.0 action: 0\n",
      "value_next [679026.9] td_target [645076.5] td_error [163293.69]\n",
      "state: [-0.03612846 -0.41758686  0.00858075  0.56857647] reward: 1.0 action: 0\n",
      "value_next [882054.2] td_target [837952.44] td_error [156653.69]\n",
      "state: [-0.0444802  -0.61282811  0.01995228  0.86395027] reward: 1.0 action: 0\n",
      "value_next [1089936.6] td_target [1035440.75] td_error [150542.44]\n",
      "state: [-0.05673676 -0.80821591  0.03723129  1.16283921] reward: 1.0 action: 0\n",
      "value_next [1303467.] td_target [1238294.6] td_error [144930.88]\n",
      "state: [-0.07290108 -1.00380238  0.06048807  1.46695908] reward: 1.0 action: 0\n",
      "value_next [1523464.6] td_target [1447292.4] td_error [139768.62]\n",
      "state: [-0.09297713 -1.19961043  0.08982725  1.77790666] reward: 1.0 action: 0\n",
      "value_next [1750746.8] td_target [1663210.4] td_error [134983.12]\n",
      "state: [-0.11696934 -1.39562166  0.12538539  2.09711181] reward: 1.0 action: 0\n",
      "value_next [1986097.1] td_target [1886793.2] td_error [130477.125]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.14488177 -1.59176201  0.16732762  2.42577943] reward: 1.0 action: 0\n",
      "value_next [2230228.] td_target [2118717.5] td_error [126124.125]\n",
      "state: [-0.17671701 -1.78788496  0.21584321  2.76482016] reward: 1.0 action: 0\n",
      "value_next [2483738.2] td_target [2359552.2] td_error [121765.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 100/200\n",
      "==============================\n",
      "state: [-0.03209103 -0.23968806 -0.03476896  0.24393539] reward: 1.0 action: 0\n",
      "value_next [681598.9] td_target [647519.94] td_error [163714.12]\n",
      "state: [-0.03688479 -0.43429658 -0.02989025  0.52545173] reward: 1.0 action: 0\n",
      "value_next [885086.5] td_target [840833.2] td_error [157001.94]\n",
      "state: [-0.04557072 -0.62898545 -0.01938121  0.80856816] reward: 1.0 action: 0\n",
      "value_next [1093391.6] td_target [1038723.] td_error [150846.]\n",
      "state: [-0.05815043 -0.82383652 -0.00320985  1.09509216] reward: 1.0 action: 0\n",
      "value_next [1307330.] td_target [1241964.5] td_error [145218.5]\n",
      "state: [-0.07462716 -1.01891605  0.01869199  1.38676624] reward: 1.0 action: 0\n",
      "value_next [1527749.] td_target [1451362.5] td_error [140074.12]\n",
      "state: [-0.09500548 -1.21426594  0.04642732  1.68523503] reward: 1.0 action: 0\n",
      "value_next [1755501.6] td_target [1667727.5] td_error [135347.88]\n",
      "state: [-0.1192908  -1.40989335  0.08013202  1.99200466] reward: 1.0 action: 0\n",
      "value_next [1991418.6] td_target [1891848.6] td_error [130951.75]\n",
      "state: [-0.14748867 -1.60575789  0.11997211  2.30839233] reward: 1.0 action: 0\n",
      "value_next [2236277.] td_target [2124464.] td_error [126772.75]\n",
      "state: [-0.17960382 -1.80175632  0.16613996  2.63546396] reward: 1.0 action: 0\n",
      "value_next [2490761.] td_target [2366224.] td_error [122667.]\n",
      "state: [-0.21563895 -1.99770459  0.21884924  2.97395994] reward: 1.0 action: 0\n",
      "value_next [2755413.] td_target [2617643.2] td_error [118454.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 101/200\n",
      "==============================\n",
      "state: [-0.0171184  -0.21766159 -0.00353862  0.28846045] reward: 1.0 action: 0\n",
      "value_next [719108.06] td_target [683153.6] td_error [172706.81]\n",
      "state: [-0.02147164 -0.4127329   0.00223059  0.58002523] reward: 1.0 action: 0\n",
      "value_next [933870.3] td_target [887177.8] td_error [165735.25]\n",
      "state: [-0.02972629 -0.60788604  0.01383109  0.87341001] reward: 1.0 action: 0\n",
      "value_next [1153813.9] td_target [1096124.1] td_error [159323.12]\n",
      "state: [-0.04188401 -0.80319329  0.03129929  1.17040908] reward: 1.0 action: 0\n",
      "value_next [1379777.] td_target [1310789.1] td_error [153439.62]\n",
      "state: [-0.05794788 -0.998708    0.05470748  1.47273799] reward: 1.0 action: 0\n",
      "value_next [1612624.6] td_target [1531994.4] td_error [148030.62]\n",
      "state: [-0.07792204 -1.19445444  0.08416224  1.78199435] reward: 1.0 action: 0\n",
      "value_next [1853218.5] td_target [1760558.5] td_error [143019.62]\n",
      "state: [-0.10181113 -1.39041595  0.11980212  2.09961069] reward: 1.0 action: 0\n",
      "value_next [2102385.5] td_target [1997267.2] td_error [138304.38]\n",
      "state: [-0.12961945 -1.58652066  0.16179434  2.42679696] reward: 1.0 action: 0\n",
      "value_next [2360876.5] td_target [2242833.8] td_error [133750.75]\n",
      "state: [-0.16134986 -1.78262477  0.21033028  2.76447167] reward: 1.0 action: 0\n",
      "value_next [2629324.2] td_target [2497859.] td_error [129192.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 102/200\n",
      "===============================\n",
      "state: [-0.00824681 -0.18786026 -0.03071212  0.28587502] reward: 1.0 action: 0\n",
      "value_next [713779.4] td_target [678091.4] td_error [174797.47]\n",
      "state: [-0.01200401 -0.38253105 -0.02499462  0.56871556] reward: 1.0 action: 0\n",
      "value_next [930340.2] td_target [883824.2] td_error [167775.56]\n",
      "state: [-0.01965463 -0.57729367 -0.01362031  0.85342059] reward: 1.0 action: 0\n",
      "value_next [1152133.4] td_target [1094527.8] td_error [161336.56]\n",
      "state: [-0.03120051 -0.77222735  0.0034481   1.14178973] reward: 1.0 action: 0\n",
      "value_next [1380020.1] td_target [1311020.1] td_error [155449.]\n",
      "state: [-0.04664505 -0.96739419  0.0262839   1.435552  ] reward: 1.0 action: 0\n",
      "value_next [1614890.2] td_target [1534146.8] td_error [150061.25]\n",
      "state: [-0.06599294 -1.1628302   0.05499494  1.73633134] reward: 1.0 action: 0\n",
      "value_next [1857636.5] td_target [1764755.6] td_error [145100.62]\n",
      "state: [-0.08924954 -1.35853433  0.08972156  2.04560408] reward: 1.0 action: 0\n",
      "value_next [2109122.2] td_target [2003667.1] td_error [140469.38]\n",
      "state: [-0.11642023 -1.55445524  0.13063364  2.36464614] reward: 1.0 action: 0\n",
      "value_next [2370148.5] td_target [2251642.] td_error [136042.]\n",
      "state: [-0.14750933 -1.75047545  0.17792657  2.69446833] reward: 1.0 action: 0\n",
      "value_next [2641408.2] td_target [2509338.8] td_error [131659.5]\n",
      "state: [-0.18251884 -1.94639293  0.23181593  3.0357399 ] reward: 1.0 action: 0\n",
      "value_next [2923435.5] td_target [2777264.8] td_error [127122.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 103/200\n",
      "===============================\n",
      "state: [-0.01590774 -0.17256586  0.0114386   0.34045292] reward: 1.0 action: 0\n",
      "value_next [771247.94] td_target [732686.56] td_error [185013.38]\n",
      "state: [-0.01935906 -0.36784868  0.01824766  0.63672086] reward: 1.0 action: 0\n",
      "value_next [1001444.4] td_target [951373.1] td_error [177671.19]\n",
      "state: [-0.02671603 -0.5632203   0.03098208  0.93509401] reward: 1.0 action: 0\n",
      "value_next [1237303.4] td_target [1175439.2] td_error [170910.94]\n",
      "state: [-0.03798043 -0.75874614  0.04968396  1.23734929] reward: 1.0 action: 0\n",
      "value_next [1479708.1] td_target [1405723.8] td_error [164694.62]\n",
      "state: [-0.05315536 -0.95446995  0.07443094  1.54517381] reward: 1.0 action: 0\n",
      "value_next [1729564.4] td_target [1643087.1] td_error [158960.25]\n",
      "state: [-0.07224476 -1.15040293  0.10533442  1.86012204] reward: 1.0 action: 0\n",
      "value_next [1987766.5] td_target [1888379.1] td_error [153618.5]\n",
      "state: [-0.09525282 -1.34651084  0.14253686  2.18356459] reward: 1.0 action: 0\n",
      "value_next [2255159.8] td_target [2142402.8] td_error [148551.62]\n",
      "state: [-0.12218303 -1.54269893  0.18620815  2.51662644] reward: 1.0 action: 0\n",
      "value_next [2532496.2] td_target [2405872.5] td_error [143605.75]\n",
      "state: [-0.15303701 -1.73879436  0.23654068  2.86011419] reward: 1.0 action: 0\n",
      "value_next [2820385.] td_target [2679366.8] td_error [138590.75]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 104/200\n",
      "===============================\n",
      "state: [-0.03252911 -0.18970225  0.00506349  0.3255713 ] reward: 1.0 action: 0\n",
      "value_next [797465.8] td_target [757593.5] td_error [189157.19]\n",
      "state: [-0.03632316 -0.38489593  0.01157492  0.61984673] reward: 1.0 action: 0\n",
      "value_next [1033237.56] td_target [981576.7] td_error [181592.19]\n",
      "state: [-0.04402108 -0.58017762  0.02397185  0.91615258] reward: 1.0 action: 0\n",
      "value_next [1274758.9] td_target [1211021.9] td_error [174632.62]\n",
      "state: [-0.05562463 -0.77561538  0.0422949   1.21627205] reward: 1.0 action: 0\n",
      "value_next [1522943.2] td_target [1446797.1] td_error [168240.75]\n",
      "state: [-0.07113694 -0.97125657  0.06662035  1.52190238] reward: 1.0 action: 0\n",
      "value_next [1778728.1] td_target [1689792.8] td_error [162354.12]\n",
      "state: [-0.09056207 -1.16711718  0.09705839  1.83461348] reward: 1.0 action: 0\n",
      "value_next [2043041.] td_target [1940889.9] td_error [156883.]\n",
      "state: [-0.11390441 -1.36316944  0.13375066  2.15579819] reward: 1.0 action: 0\n",
      "value_next [2316762.] td_target [2200924.8] td_error [151709.25]\n",
      "state: [-0.1411678  -1.55932691  0.17686663  2.48661204] reward: 1.0 action: 0\n",
      "value_next [2600680.2] td_target [2470647.2] td_error [146680.25]\n",
      "state: [-0.17235434 -1.75542724  0.22659887  2.82790185] reward: 1.0 action: 0\n",
      "value_next [2895441.5] td_target [2750670.5] td_error [141603.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 105/200\n",
      "================================\n",
      "state: [ 0.02257016 -0.15440006 -0.0006638   0.28482841] reward: 1.0 action: 0\n",
      "value_next [762243.4] td_target [724132.2] td_error [196021.81]\n",
      "state: [ 0.01948216 -0.34951254  0.00503277  0.5773019 ] reward: 1.0 action: 0\n",
      "value_next [1002930.25] td_target [952784.75] td_error [188183.]\n",
      "state: [ 0.01249191 -0.54470467  0.01657881  0.87156602] reward: 1.0 action: 0\n",
      "value_next [1249363.8] td_target [1186896.5] td_error [180973.69]\n",
      "state: [ 0.00159781 -0.74004813  0.03401013  1.16941488] reward: 1.0 action: 0\n",
      "value_next [1502476.4] td_target [1427353.5] td_error [174357.88]\n",
      "state: [-0.01320315 -0.93559554  0.05739843  1.47256344] reward: 1.0 action: 0\n",
      "value_next [1763229.4] td_target [1675068.9] td_error [168274.38]\n",
      "state: [-0.03191506 -1.13137036  0.0868497   1.78260792] reward: 1.0 action: 0\n",
      "value_next [2032578.6] td_target [1930950.6] td_error [162637.12]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.05454247 -1.3273548   0.12250185  2.10097829] reward: 1.0 action: 0\n",
      "value_next [2311438.] td_target [2195867.] td_error [157329.5]\n",
      "state: [-0.08108956 -1.52347558  0.16452142  2.4288804 ] reward: 1.0 action: 0\n",
      "value_next [2600637.8] td_target [2470606.8] td_error [152202.25]\n",
      "state: [-0.11155907 -1.7195871   0.21309903  2.76722689] reward: 1.0 action: 0\n",
      "value_next [2900870.2] td_target [2755827.8] td_error [147066.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 106/200\n",
      "================================\n",
      "state: [ 0.02392244 -0.16950939 -0.00372384  0.31467579] reward: 1.0 action: 0\n",
      "value_next [803778.6] td_target [763590.7] td_error [200061.5]\n",
      "state: [ 0.02053225 -0.3645781   0.00256967  0.60618201] reward: 1.0 action: 0\n",
      "value_next [1050854.8] td_target [998313.] td_error [192092.94]\n",
      "state: [ 0.01324069 -0.55973589  0.01469332  0.89967321] reward: 1.0 action: 0\n",
      "value_next [1303894.2] td_target [1238700.5] td_error [184769.]\n",
      "state: [ 0.00204597 -0.75505384  0.03268678  1.1969382 ] reward: 1.0 action: 0\n",
      "value_next [1563858.2] td_target [1485666.4] td_error [178050.5]\n",
      "state: [-0.01305511 -0.95058328  0.05662554  1.49968389] reward: 1.0 action: 0\n",
      "value_next [1831733.4] td_target [1740147.6] td_error [171872.62]\n",
      "state: [-0.03206677 -1.14634553  0.08661922  1.80949562] reward: 1.0 action: 0\n",
      "value_next [2108498.2] td_target [2003074.2] td_error [166144.5]\n",
      "state: [-0.05499368 -1.34231985  0.12280913  2.12778923] reward: 1.0 action: 0\n",
      "value_next [2395085.] td_target [2275331.8] td_error [160744.75]\n",
      "state: [-0.08184008 -1.53842898  0.16536492  2.45575261] reward: 1.0 action: 0\n",
      "value_next [2692335.8] td_target [2557720.] td_error [155517.75]\n",
      "state: [-0.11260866 -1.73452219  0.21447997  2.79427589] reward: 1.0 action: 0\n",
      "value_next [3000949.5] td_target [2850903.] td_error [150266.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 107/200\n",
      "================================\n",
      "state: [ 0.01252428 -0.22181865 -0.00876315  0.32469264] reward: 1.0 action: 0\n",
      "value_next [859242.25] td_target [816281.1] td_error [203305.56]\n",
      "state: [ 0.00808791 -0.41681474 -0.00226929  0.61459921] reward: 1.0 action: 0\n",
      "value_next [1112610.6] td_target [1056981.1] td_error [195155.25]\n",
      "state: [-2.48385617e-04 -6.11904907e-01  1.00226895e-02  9.06566536e-01] reward: 1.0 action: 0\n",
      "value_next [1372112.4] td_target [1303507.8] td_error [187668.38]\n",
      "state: [-0.01248648 -0.80716112  0.02815402  1.20238276] reward: 1.0 action: 0\n",
      "value_next [1638739.6] td_target [1556803.6] td_error [180803.62]\n",
      "state: [-0.02862971 -1.00263559  0.05220168  1.50375445] reward: 1.0 action: 0\n",
      "value_next [1913508.2] td_target [1817833.9] td_error [174494.62]\n",
      "state: [-0.04868242 -1.19835073  0.08227676  1.81226758] reward: 1.0 action: 0\n",
      "value_next [2197425.2] td_target [2087555.] td_error [168647.75]\n",
      "state: [-0.07264943 -1.39428716  0.11852212  2.12934009] reward: 1.0 action: 0\n",
      "value_next [2491449.8] td_target [2366878.2] td_error [163139.75]\n",
      "state: [-0.10053518 -1.59036936  0.16110892  2.45616394] reward: 1.0 action: 0\n",
      "value_next [2796448.] td_target [2656626.5] td_error [157810.25]\n",
      "state: [-0.13234256 -1.78644876  0.2102322   2.79363558] reward: 1.0 action: 0\n",
      "value_next [3113140.] td_target [2957484.] td_error [152459.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 108/200\n",
      "================================\n",
      "state: [-0.03954483 -0.21429373 -0.0183227   0.29730925] reward: 1.0 action: 0\n",
      "value_next [879645.7] td_target [835664.4] td_error [207024.69]\n",
      "state: [-0.0438307  -0.40914976 -0.01237651  0.58415766] reward: 1.0 action: 0\n",
      "value_next [1137783.5] td_target [1080895.2] td_error [198632.19]\n",
      "state: [-5.20136984e-02 -6.04096168e-01 -6.93358121e-04  8.72916272e-01] reward: 1.0 action: 0\n",
      "value_next [1402086.1] td_target [1331982.8] td_error [190929.12]\n",
      "state: [-0.06409562 -0.79920868  0.01676497  1.16538113] reward: 1.0 action: 0\n",
      "value_next [1673572.9] td_target [1589895.2] td_error [183875.62]\n",
      "state: [-0.0800798  -0.99454479  0.04007259  1.46327279] reward: 1.0 action: 0\n",
      "value_next [1953292.5] td_target [1855628.9] td_error [177407.]\n",
      "state: [-0.09997069 -1.19013417  0.06933805  1.76819952] reward: 1.0 action: 0\n",
      "value_next [2242288.5] td_target [2130175.] td_error [171432.62]\n",
      "state: [-0.12377337 -1.38596724  0.10470204  2.08161231] reward: 1.0 action: 0\n",
      "value_next [2541562.] td_target [2414484.8] td_error [165831.]\n",
      "state: [-0.15149272 -1.58198135  0.14633428  2.40474954] reward: 1.0 action: 0\n",
      "value_next [2852027.8] td_target [2709427.2] td_error [160446.5]\n",
      "state: [-0.18313235 -1.77804446  0.19442927  2.73856988] reward: 1.0 action: 0\n",
      "value_next [3174461.8] td_target [3015739.5] td_error [155081.25]\n",
      "state: [-0.21869324 -1.97393635  0.24920067  3.08367399] reward: 1.0 action: 0\n",
      "value_next [3509437.8] td_target [3333966.8] td_error [149492.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 109/200\n",
      "=================================\n",
      "state: [-0.00886659 -0.22662548  0.01042068  0.34450015] reward: 1.0 action: 0\n",
      "value_next [937711.5] td_target [890826.94] td_error [216986.5]\n",
      "state: [-0.0133991  -0.42189411  0.01731068  0.64045082] reward: 1.0 action: 0\n",
      "value_next [1209310.8] td_target [1148846.2] td_error [208302.12]\n",
      "state: [-0.02183698 -0.61725306  0.0301197   0.93853443] reward: 1.0 action: 0\n",
      "value_next [1487535.8] td_target [1413160.] td_error [200309.12]\n",
      "state: [-0.03418204 -0.81276786  0.04889039  1.24052746] reward: 1.0 action: 0\n",
      "value_next [1773429.9] td_target [1684759.4] td_error [192961.]\n",
      "state: [-0.0504374  -1.00848228  0.07370094  1.54811634] reward: 1.0 action: 0\n",
      "value_next [2068057.1] td_target [1964655.2] td_error [186182.12]\n",
      "state: [-0.07060704 -1.20440748  0.10466326  1.8628548 ] reward: 1.0 action: 0\n",
      "value_next [2372463.5] td_target [2253841.2] td_error [179866.88]\n",
      "state: [-0.09469519 -1.40050921  0.14192036  2.18611258] reward: 1.0 action: 0\n",
      "value_next [2687633.] td_target [2553252.2] td_error [173873.75]\n",
      "state: [-0.12270538 -1.59669261  0.18564261  2.51901376] reward: 1.0 action: 0\n",
      "value_next [3014437.2] td_target [2863716.2] td_error [168023.25]\n",
      "state: [-0.15463923 -1.7927847   0.23602289  2.86236395] reward: 1.0 action: 0\n",
      "value_next [3353575.] td_target [3185897.2] td_error [162089.25]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 110/200\n",
      "=================================\n",
      "state: [ 0.0225719  -0.21156818 -0.04412934  0.32063417] reward: 1.0 action: 0\n",
      "value_next [906316.25] td_target [861001.44] td_error [215454.06]\n",
      "state: [ 0.01834054 -0.4060348  -0.03771666  0.59908031] reward: 1.0 action: 0\n",
      "value_next [1174598.2] td_target [1115869.4] td_error [206867.5]\n",
      "state: [ 0.01021984 -0.60060931 -0.02573505  0.87964844] reward: 1.0 action: 0\n",
      "value_next [1449440.5] td_target [1376969.5] td_error [199008.88]\n",
      "state: [-0.00179234 -0.79537234 -0.00814208  1.16413097] reward: 1.0 action: 0\n",
      "value_next [1731912.5] td_target [1645317.9] td_error [191835.38]\n",
      "state: [-0.01769979 -0.99038734  0.01514053  1.45425004] reward: 1.0 action: 0\n",
      "value_next [2023112.2] td_target [1921957.6] td_error [185280.25]\n",
      "state: [-0.03750754 -1.18569189  0.04422554  1.75162442] reward: 1.0 action: 0\n",
      "value_next [2324136.2] td_target [2207930.5] td_error [179253.25]\n",
      "state: [-0.06122138 -1.38128696  0.07925802  2.05772822] reward: 1.0 action: 0\n",
      "value_next [2636041.8] td_target [2504240.8] td_error [173633.]\n",
      "state: [-0.08884711 -1.57712393  0.12041259  2.37383897] reward: 1.0 action: 0\n",
      "value_next [2959806.2] td_target [2811817.] td_error [168264.5]\n",
      "state: [-0.12038959 -1.77308884  0.16788937  2.70097373] reward: 1.0 action: 0\n",
      "value_next [3296272.2] td_target [3131459.5] td_error [162951.]\n",
      "state: [-0.15585137 -1.96898402  0.22190884  3.03981288] reward: 1.0 action: 0\n",
      "value_next [3646087.2] td_target [3463783.8] td_error [157447.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 111/200\n",
      "=================================\n",
      "state: [ 0.03771883 -0.21723703  0.01773578  0.27514556] reward: 1.0 action: 0\n",
      "value_next [930496.3] td_target [883972.5] td_error [233411.12]\n",
      "state: [ 0.03337409 -0.41260748  0.02323869  0.57336919] reward: 1.0 action: 0\n",
      "value_next [1218061.4] td_target [1157159.2] td_error [223941.25]\n",
      "state: [ 0.02512194 -0.60804741  0.03470608  0.87328149] reward: 1.0 action: 0\n",
      "value_next [1512338.] td_target [1436722.1] td_error [215208.5]\n",
      "state: [ 0.012961   -0.80362365  0.05217171  1.17667073] reward: 1.0 action: 0\n",
      "value_next [1814416.6] td_target [1723696.8] td_error [207168.75]\n",
      "state: [-0.00311148 -0.99938307  0.07570512  1.48524185] reward: 1.0 action: 0\n",
      "value_next [2125415.5] td_target [2019145.8] td_error [199747.88]\n",
      "state: [-0.02309914 -1.19534181  0.10540996  1.80057409] reward: 1.0 action: 0\n",
      "value_next [2446438.5] td_target [2324117.5] td_error [192838.5]\n",
      "state: [-0.04700598 -1.39147266  0.14142144  2.1240706 ] reward: 1.0 action: 0\n",
      "value_next [2778531.8] td_target [2639606.2] td_error [186298.75]\n",
      "state: [-0.07483543 -1.58769021  0.18390285  2.45689801] reward: 1.0 action: 0\n",
      "value_next [3122628.] td_target [2966497.5] td_error [179942.]\n",
      "state: [-0.10658923 -1.78383366  0.23304081  2.79991517] reward: 1.0 action: 0\n",
      "value_next [3479487.] td_target [3305513.5] td_error [173537.75]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 112/200\n",
      "==================================\n",
      "state: [-0.0188219  -0.22090677  0.00622617  0.28637164] reward: 1.0 action: 0\n",
      "value_next [981882.56] td_target [932789.44] td_error [236103.06]\n",
      "state: [-0.02324004 -0.41611696  0.0119536   0.58101173] reward: 1.0 action: 0\n",
      "value_next [1274943.6] td_target [1211197.4] td_error [226477.5]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.03156238 -0.61140435  0.02357384  0.87743616] reward: 1.0 action: 0\n",
      "value_next [1574863.2] td_target [1496121.1] td_error [217617.25]\n",
      "state: [-0.04379046 -0.80683861  0.04112256  1.17743617] reward: 1.0 action: 0\n",
      "value_next [1882771.1] td_target [1788633.5] td_error [209475.38]\n",
      "state: [-0.05992724 -1.00246986  0.06467128  1.48272158] reward: 1.0 action: 0\n",
      "value_next [2199824.8] td_target [2089834.5] td_error [201977.12]\n",
      "state: [-0.07997663 -1.19831824  0.09432572  1.79488014] reward: 1.0 action: 0\n",
      "value_next [2527168.8] td_target [2400811.2] td_error [195013.25]\n",
      "state: [-0.103943   -1.39436167  0.13022332  2.11532871] reward: 1.0 action: 0\n",
      "value_next [2865889.8] td_target [2722596.2] td_error [188441.25]\n",
      "state: [-0.13183023 -1.59052126  0.17252989  2.44525429] reward: 1.0 action: 0\n",
      "value_next [3216963.] td_target [3056115.8] td_error [182074.]\n",
      "state: [-0.16364066 -1.78664434  0.22143498  2.78554384] reward: 1.0 action: 0\n",
      "value_next [3581191.8] td_target [3402133.] td_error [175679.25]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 113/200\n",
      "==================================\n",
      "state: [-0.03408213 -0.19588943  0.00379391  0.32759324] reward: 1.0 action: 0\n",
      "value_next [1022333.5] td_target [971217.8] td_error [240937.56]\n",
      "state: [-0.03799992 -0.39106519  0.01034577  0.62147018] reward: 1.0 action: 0\n",
      "value_next [1322607.] td_target [1256477.6] td_error [231223.56]\n",
      "state: [-0.04582122 -0.58633008  0.02277518  0.91739344] reward: 1.0 action: 0\n",
      "value_next [1630049.6] td_target [1548548.1] td_error [222291.25]\n",
      "state: [-0.05754782 -0.78175243  0.04112305  1.21714627] reward: 1.0 action: 0\n",
      "value_next [1945825.] td_target [1848534.8] td_error [214090.12]\n",
      "state: [-0.07318287 -0.97737987  0.06546597  1.52242611] reward: 1.0 action: 0\n",
      "value_next [2271119.8] td_target [2157564.8] td_error [206538.25]\n",
      "state: [-0.09273047 -1.17322876  0.09591449  1.8348034 ] reward: 1.0 action: 0\n",
      "value_next [2607100.5] td_target [2476746.5] td_error [199519.75]\n",
      "state: [-0.11619504 -1.36927174  0.13261056  2.15567192] reward: 1.0 action: 0\n",
      "value_next [2954871.5] td_target [2807129.] td_error [192884.]\n",
      "state: [-0.14358048 -1.56542294  0.175724    2.48618878] reward: 1.0 action: 0\n",
      "value_next [3315415.2] td_target [3149645.5] td_error [186435.5]\n",
      "state: [-0.17488894 -1.76152071  0.22544778  2.82720302] reward: 1.0 action: 0\n",
      "value_next [3689529.] td_target [3505053.5] td_error [179928.5]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 114/200\n",
      "==================================\n",
      "state: [ 0.02120983 -0.1568832   0.0157709   0.33647411] reward: 1.0 action: 0\n",
      "value_next [1011452.9] td_target [960881.25] td_error [250973.5]\n",
      "state: [ 0.01807216 -0.352226    0.02250038  0.63408832] reward: 1.0 action: 0\n",
      "value_next [1321342.9] td_target [1255276.8] td_error [240971.25]\n",
      "state: [ 0.01102764 -0.54765446  0.03518215  0.93377137] reward: 1.0 action: 0\n",
      "value_next [1638637.8] td_target [1556706.9] td_error [231762.88]\n",
      "state: [ 7.45544128e-05 -7.43232935e-01  5.38575743e-02  1.23729895e+00] reward: 1.0 action: 0\n",
      "value_next [1964520.] td_target [1866295.] td_error [223294.38]\n",
      "state: [-0.0147901  -0.93900388  0.07860355  1.54635605] reward: 1.0 action: 0\n",
      "value_next [2300193.] td_target [2185184.2] td_error [215477.88]\n",
      "state: [-0.03357018 -1.13497688  0.10953067  1.86249356] reward: 1.0 action: 0\n",
      "value_next [2646836.8] td_target [2514496.] td_error [208191.75]\n",
      "state: [-0.05626972 -1.3311157   0.14678055  2.18707641] reward: 1.0 action: 0\n",
      "value_next [3005556.8] td_target [2855280.] td_error [201272.75]\n",
      "state: [-0.08289203 -1.52732299  0.19052207  2.52122133] reward: 1.0 action: 0\n",
      "value_next [3377327.] td_target [3208461.5] td_error [194514.25]\n",
      "state: [-0.11343849 -1.7234227   0.2409465   2.86572365] reward: 1.0 action: 0\n",
      "value_next [3762921.] td_target [3574776.] td_error [187655.5]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 115/200\n",
      "==================================\n",
      "state: [ 0.00645815 -0.2138645   0.02618881  0.28953458] reward: 1.0 action: 0\n",
      "value_next [1054582.8] td_target [1001854.6] td_error [258726.88]\n",
      "state: [ 0.00218086 -0.40934991  0.0319795   0.59036078] reward: 1.0 action: 0\n",
      "value_next [1374470.1] td_target [1305747.6] td_error [248199.]\n",
      "state: [-0.00600614 -0.60490468  0.04378671  0.89294332] reward: 1.0 action: 0\n",
      "value_next [1701779.] td_target [1616691.] td_error [238488.25]\n",
      "state: [-0.01810423 -0.80059231  0.06164558  1.1990626 ] reward: 1.0 action: 0\n",
      "value_next [2037722.1] td_target [1935837.] td_error [229542.62]\n",
      "state: [-0.03411608 -0.99645531  0.08562683  1.51041136] reward: 1.0 action: 0\n",
      "value_next [2383536.5] td_target [2264360.8] td_error [221276.12]\n",
      "state: [-0.05404519 -1.19250401  0.11583506  1.82855054] reward: 1.0 action: 0\n",
      "value_next [2740436.5] td_target [2603415.8] td_error [213564.75]\n",
      "state: [-0.07789527 -1.3887036   0.15240607  2.15485712] reward: 1.0 action: 0\n",
      "value_next [3109562.2] td_target [2954085.] td_error [206243.5]\n",
      "state: [-0.10566934 -1.58495886  0.19550321  2.49046184] reward: 1.0 action: 0\n",
      "value_next [3491919.5] td_target [3317324.5] td_error [199100.75]\n",
      "state: [-0.13736852 -1.78109664  0.24531245  2.83617637] reward: 1.0 action: 0\n",
      "value_next [3888310.] td_target [3693895.5] td_error [191872.75]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 116/200\n",
      "===================================\n",
      "state: [ 0.00426808 -0.20656755 -0.04523222  0.24620156] reward: 1.0 action: 0\n",
      "value_next [1014452.] td_target [963730.4] td_error [253011.38]\n",
      "state: [ 1.36724874e-04 -4.01015270e-01 -4.03081839e-02  5.24281087e-01] reward: 1.0 action: 0\n",
      "value_next [1326237.8] td_target [1259926.9] td_error [242661.06]\n",
      "state: [-0.00788358 -0.59554746 -0.02982256  0.80399496] reward: 1.0 action: 0\n",
      "value_next [1645221.9] td_target [1562961.8] td_error [233181.5]\n",
      "state: [-0.01979453 -0.7902481  -0.01374266  1.08714953] reward: 1.0 action: 0\n",
      "value_next [1972650.] td_target [1874018.5] td_error [224527.12]\n",
      "state: [-0.03559949 -0.98518614  0.00800033  1.37548876] reward: 1.0 action: 0\n",
      "value_next [2309805.2] td_target [2194316.] td_error [216626.]\n",
      "state: [-0.05530321 -1.18040714  0.0355101   1.67066296] reward: 1.0 action: 0\n",
      "value_next [2657973.] td_target [2525075.2] td_error [209377.75]\n",
      "state: [-0.07891136 -1.37592323  0.06892336  1.97418997] reward: 1.0 action: 0\n",
      "value_next [3018401.8] td_target [2867482.8] td_error [202652.]\n",
      "state: [-0.10642982 -1.57170071  0.10840716  2.28740638] reward: 1.0 action: 0\n",
      "value_next [3392255.] td_target [3222643.2] td_error [196276.]\n",
      "state: [-0.13786384 -1.76764509  0.15415529  2.61140689] reward: 1.0 action: 0\n",
      "value_next [3780556.2] td_target [3591529.5] td_error [190035.25]\n",
      "state: [-0.17321674 -1.96358343  0.20638343  2.94697149] reward: 1.0 action: 0\n",
      "value_next [4184116.8] td_target [3974911.8] td_error [183658.25]\n",
      "state: [-0.21248841 -2.15924443  0.26532286  3.29448153] reward: 1.0 action: 0\n",
      "value_next [4603457.5] td_target [4373285.5] td_error [176816.]\n",
      "reward:11.0, max reward:12.0, episode len:11\n",
      "\n",
      "Episode 117/200\n",
      "===================================\n",
      "state: [ 0.03969304 -0.16286573  0.01297965  0.26386898] reward: 1.0 action: 0\n",
      "value_next [1040671.4] td_target [988638.8] td_error [273469.3]\n",
      "state: [ 0.03643573 -0.35817052  0.01825703  0.56061739] reward: 1.0 action: 0\n",
      "value_next [1374720.8] td_target [1305985.8] td_error [262417.56]\n",
      "state: [ 0.02927232 -0.55354389  0.02946937  0.85899587] reward: 1.0 action: 0\n",
      "value_next [1716488.2] td_target [1630664.9] td_error [252233.12]\n",
      "state: [ 0.01820144 -0.74905459  0.04664929  1.16079734] reward: 1.0 action: 0\n",
      "value_next [2067236.8] td_target [1963875.9] td_error [242865.38]\n",
      "state: [ 0.00322035 -0.94475215  0.06986524  1.46773425] reward: 1.0 action: 0\n",
      "value_next [2428260.5] td_target [2306848.5] td_error [234228.88]\n",
      "state: [-0.0156747  -1.14065625  0.09921992  1.78139718] reward: 1.0 action: 0\n",
      "value_next [2800838.] td_target [2660797.] td_error [226200.5]\n",
      "state: [-0.03848782 -1.33674437  0.13484787  2.10320557] reward: 1.0 action: 0\n",
      "value_next [3186181.] td_target [3026873.] td_error [218617.5]\n",
      "state: [-0.06522271 -1.53293723  0.17691198  2.43434847] reward: 1.0 action: 0\n",
      "value_next [3585374.5] td_target [3406106.8] td_error [211268.]\n",
      "state: [-0.09588145 -1.72908174  0.22559895  2.77571428] reward: 1.0 action: 0\n",
      "value_next [3999305.5] td_target [3799341.2] td_error [203887.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 118/200\n",
      "===================================\n",
      "state: [ 0.0362409  -0.15645368 -0.02896656  0.27970144] reward: 1.0 action: 0\n",
      "value_next [1052704.8] td_target [1000070.5] td_error [271579.5]\n",
      "state: [ 0.03311183 -0.3511507  -0.02337253  0.56310948] reward: 1.0 action: 0\n",
      "value_next [1385464.5] td_target [1316192.2] td_error [260638.25]\n",
      "state: [ 0.02608881 -0.54593702 -0.01211034  0.84833827] reward: 1.0 action: 0\n",
      "value_next [1725979.5] td_target [1639681.5] td_error [250605.5]\n",
      "state: [ 0.01517007 -0.74089172  0.00485643  1.13718852] reward: 1.0 action: 0\n",
      "value_next [2075557.] td_target [1971780.1] td_error [241429.25]\n",
      "state: [ 3.52238424e-04 -9.36076856e-01  2.76001954e-02  1.43139055e+00] reward: 1.0 action: 0\n",
      "value_next [2435537.2] td_target [2313761.2] td_error [233027.12]\n",
      "state: [-0.0183693  -1.13152838  0.05622801  1.7325696 ] reward: 1.0 action: 0\n",
      "value_next [2807254.8] td_target [2666893.] td_error [225285.75]\n",
      "state: [-0.04099987 -1.32724521  0.0908794   2.04220322] reward: 1.0 action: 0\n",
      "value_next [3191991.5] td_target [3032393.] td_error [218054.25]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.06754477 -1.523176    0.13172346  2.36156835] reward: 1.0 action: 0\n",
      "value_next [3590923.8] td_target [3411378.5] td_error [211137.75]\n",
      "state: [-0.09800829 -1.71920321  0.17895483  2.6916767 ] reward: 1.0 action: 0\n",
      "value_next [4005058.2] td_target [3804806.2] td_error [204291.25]\n",
      "state: [-0.13239235 -1.91512478  0.23278836  3.03319819] reward: 1.0 action: 0\n",
      "value_next [4435154.] td_target [4213397.] td_error [197208.75]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 119/200\n",
      "====================================\n",
      "state: [-0.0385229  -0.19748645  0.00245259  0.29416177] reward: 1.0 action: 0\n",
      "value_next [1172852.6] td_target [1114211.] td_error [281967.2]\n",
      "state: [-0.04247263 -0.39264328  0.00833583  0.5876172 ] reward: 1.0 action: 0\n",
      "value_next [1522581.8] td_target [1446453.6] td_error [270460.88]\n",
      "state: [-0.0503255  -0.58788097  0.02008817  0.88291427] reward: 1.0 action: 0\n",
      "value_next [1880423.8] td_target [1786403.5] td_error [259875.62]\n",
      "state: [-0.06208312 -0.7832699   0.03774646  1.18184404] reward: 1.0 action: 0\n",
      "value_next [2247726.8] td_target [2135341.5] td_error [250155.5]\n",
      "state: [-0.07774851 -0.97886087  0.06138334  1.48611634] reward: 1.0 action: 0\n",
      "value_next [2625867.] td_target [2494574.5] td_error [241208.5]\n",
      "state: [-0.09732573 -1.17467484  0.09110566  1.79731948] reward: 1.0 action: 0\n",
      "value_next [3016200.] td_target [2865391.] td_error [232904.5]\n",
      "state: [-0.12081923 -1.37069071  0.12705205  2.11687193] reward: 1.0 action: 0\n",
      "value_next [3420009.8] td_target [3249010.2] td_error [225070.75]\n",
      "state: [-0.14823304 -1.56683085  0.16938949  2.44596361] reward: 1.0 action: 0\n",
      "value_next [3838446.2] td_target [3646525.] td_error [217487.]\n",
      "state: [-0.17956966 -1.7629441   0.21830876  2.785486  ] reward: 1.0 action: 0\n",
      "value_next [4272451.] td_target [4058829.5] td_error [209874.5]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 120/200\n",
      "====================================\n",
      "state: [-0.04459814 -0.15821291 -0.01958206  0.25443437] reward: 1.0 action: 0\n",
      "value_next [1135552.1] td_target [1078775.5] td_error [286129.8]\n",
      "state: [-0.0477624  -0.35304987 -0.01449337  0.54087702] reward: 1.0 action: 0\n",
      "value_next [1487306.2] td_target [1412941.9] td_error [274388.62]\n",
      "state: [-0.05482339 -0.54796513 -0.00367583  0.82895831] reward: 1.0 action: 0\n",
      "value_next [1847057.4] td_target [1754705.5] td_error [263613.75]\n",
      "state: [-0.0657827  -0.74303664  0.01290334  1.1204829 ] reward: 1.0 action: 0\n",
      "value_next [2216189.8] td_target [2105381.2] td_error [253751.12]\n",
      "state: [-0.08064343 -0.93832544  0.035313    1.41718519] reward: 1.0 action: 0\n",
      "value_next [2596123.8] td_target [2466318.5] td_error [244714.75]\n",
      "state: [-0.09940994 -1.1338664   0.0636567   1.72069363] reward: 1.0 action: 0\n",
      "value_next [2988271.5] td_target [2838859.] td_error [236383.5]\n",
      "state: [-0.12208727 -1.32965709  0.09807057  2.03248726] reward: 1.0 action: 0\n",
      "value_next [3393988.8] td_target [3224290.2] td_error [228596.75]\n",
      "state: [-0.14868041 -1.52564442  0.13872032  2.35384221] reward: 1.0 action: 0\n",
      "value_next [3814515.] td_target [3623790.2] td_error [221148.]\n",
      "state: [-0.1791933  -1.72170865  0.18579716  2.6857665 ] reward: 1.0 action: 0\n",
      "value_next [4250907.5] td_target [4038363.] td_error [213775.25]\n",
      "state: [-0.21362747 -1.91764504  0.23951249  3.02892341] reward: 1.0 action: 0\n",
      "value_next [4703957.5] td_target [4468760.5] td_error [206156.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 121/200\n",
      "====================================\n",
      "state: [-0.01332963 -0.18938332  0.04375616  0.33411886] reward: 1.0 action: 0\n",
      "value_next [1265495.4] td_target [1202221.6] td_error [304356.7]\n",
      "state: [-0.0171173  -0.38509983  0.05043853  0.64027284] reward: 1.0 action: 0\n",
      "value_next [1643045.1] td_target [1560893.9] td_error [292045.38]\n",
      "state: [-0.02481929 -0.58088734  0.06324399  0.94840325] reward: 1.0 action: 0\n",
      "value_next [2029405.1] td_target [1927935.9] td_error [280671.75]\n",
      "state: [-0.03643704 -0.77680116  0.08221206  1.26026781] reward: 1.0 action: 0\n",
      "value_next [2425977.5] td_target [2304679.5] td_error [270167.62]\n",
      "state: [-0.05197306 -0.97287285  0.10741741  1.57752512] reward: 1.0 action: 0\n",
      "value_next [2834178.] td_target [2692470.] td_error [260421.]\n",
      "state: [-0.07143052 -1.16909817  0.13896791  1.90168665] reward: 1.0 action: 0\n",
      "value_next [3255378.] td_target [3092610.] td_error [251273.75]\n",
      "state: [-0.09481248 -1.36542319  0.17700165  2.23406036] reward: 1.0 action: 0\n",
      "value_next [3690840.2] td_target [3506299.2] td_error [242517.5]\n",
      "state: [-0.12212095 -1.56172821  0.22168286  2.57568449] reward: 1.0 action: 0\n",
      "value_next [4141645.5] td_target [3934564.2] td_error [233885.75]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 122/200\n",
      "=====================================\n",
      "state: [ 0.0241634  -0.22807514 -0.00395523  0.34082958] reward: 1.0 action: 0\n",
      "value_next [1278686.5] td_target [1214753.1] td_error [300389.12]\n",
      "state: [ 0.0196019  -0.42314059  0.00286136  0.63226264] reward: 1.0 action: 0\n",
      "value_next [1652908.4] td_target [1570263.9] td_error [288276.88]\n",
      "state: [ 0.01113908 -0.61830235  0.01550661  0.92584531] reward: 1.0 action: 0\n",
      "value_next [2035980.9] td_target [1934182.8] td_error [277156.12]\n",
      "state: [-0.00122696 -0.81363025  0.03402352  1.2233607 ] reward: 1.0 action: 0\n",
      "value_next [2429362.5] td_target [2307895.2] td_error [266957.88]\n",
      "state: [-0.01749957 -1.00917354  0.05849073  1.52650687] reward: 1.0 action: 0\n",
      "value_next [2834530.] td_target [2692804.5] td_error [257577.75]\n",
      "state: [-0.03768304 -1.20495058  0.08902087  1.8368567 ] reward: 1.0 action: 0\n",
      "value_next [3252929.5] td_target [3090284.] td_error [248869.]\n",
      "state: [-0.06178205 -1.40093649  0.125758    2.15580907] reward: 1.0 action: 0\n",
      "value_next [3685921.5] td_target [3501626.5] td_error [240644.5]\n",
      "state: [-0.08980078 -1.59704859  0.16887419  2.48452961] reward: 1.0 action: 0\n",
      "value_next [4134713.5] td_target [3927978.8] td_error [232659.25]\n",
      "state: [-0.12174175 -1.79312918  0.21856478  2.82387987] reward: 1.0 action: 0\n",
      "value_next [4600282.] td_target [4370269.] td_error [224611.75]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 123/200\n",
      "=====================================\n",
      "state: [-0.00585479 -0.16383103 -0.01226619  0.25580052] reward: 1.0 action: 0\n",
      "value_next [1205590.6] td_target [1145312.1] td_error [309011.38]\n",
      "state: [-0.00913141 -0.35877572 -0.00715018  0.54458939] reward: 1.0 action: 0\n",
      "value_next [1584259.2] td_target [1505047.2] td_error [296371.5]\n",
      "state: [-0.01630692 -0.55379647  0.00374161  0.8350109 ] reward: 1.0 action: 0\n",
      "value_next [1971500.2] td_target [1872926.2] td_error [284763.12]\n",
      "state: [-0.02738285 -0.74896933  0.02044182  1.12886818] reward: 1.0 action: 0\n",
      "value_next [2368793.2] td_target [2250354.5] td_error [274127.88]\n",
      "state: [-0.04236224 -0.94435298  0.04301919  1.4278919 ] reward: 1.0 action: 0\n",
      "value_next [2777652.2] td_target [2638770.5] td_error [264371.]\n",
      "state: [-0.0612493  -1.13997911  0.07157703  1.73370335] reward: 1.0 action: 0\n",
      "value_next [3199577.2] td_target [3039599.2] td_error [255359.]\n",
      "state: [-0.08404888 -1.33584107  0.10625109  2.04776969] reward: 1.0 action: 0\n",
      "value_next [3636002.2] td_target [3454203.] td_error [246916.75]\n",
      "state: [-0.1107657  -1.53188018  0.14720649  2.37134914] reward: 1.0 action: 0\n",
      "value_next [4088230.5] td_target [3883820.] td_error [238816.25]\n",
      "state: [-0.14140331 -1.72796956  0.19463347  2.70542463] reward: 1.0 action: 0\n",
      "value_next [4557363.5] td_target [4329496.5] td_error [230773.75]\n",
      "state: [-0.1759627  -1.92389554  0.24874196  3.05062621] reward: 1.0 action: 0\n",
      "value_next [5044207.5] td_target [4791998.] td_error [222435.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 124/200\n",
      "=====================================\n",
      "state: [ 3.20016183e-05 -1.74975656e-01 -3.15142935e-02  2.39572855e-01] reward: 1.0 action: 0\n",
      "value_next [1217299.1] td_target [1156435.1] td_error [312715.25]\n",
      "state: [-0.00346751 -0.36963357 -0.02672284  0.52215107] reward: 1.0 action: 0\n",
      "value_next [1600315.6] td_target [1520300.9] td_error [299884.5]\n",
      "state: [-0.01086018 -0.56436939 -0.01627981  0.80629494] reward: 1.0 action: 0\n",
      "value_next [1991985.] td_target [1892386.8] td_error [288119.25]\n",
      "state: [-2.21475707e-02 -7.59264454e-01 -1.53916142e-04  1.09381274e+00] reward: 1.0 action: 0\n",
      "value_next [2393819.2] td_target [2274129.2] td_error [277362.38]\n",
      "state: [-0.03733286 -0.95438438  0.02172234  1.38644737] reward: 1.0 action: 0\n",
      "value_next [2807369.] td_target [2667001.5] td_error [267522.75]\n",
      "state: [-0.05642055 -1.14977028  0.04945129  1.68584295] reward: 1.0 action: 0\n",
      "value_next [3234181.2] td_target [3072473.2] td_error [258473.25]\n",
      "state: [-0.07941595 -1.34542824  0.08316814  1.99350386] reward: 1.0 action: 0\n",
      "value_next [3675745.5] td_target [3491959.2] td_error [250046.]\n",
      "state: [-0.10632452 -1.54131646  0.12303822  2.31074382] reward: 1.0 action: 0\n",
      "value_next [4133437.5] td_target [3926766.5] td_error [242025.]\n",
      "state: [-0.13715085 -1.73732984  0.1692531   2.63862325] reward: 1.0 action: 0\n",
      "value_next [4608446.] td_target [4378024.5] td_error [234135.75]\n",
      "state: [-0.17189744 -1.93328204  0.22202556  2.97787473] reward: 1.0 action: 0\n",
      "value_next [5101686.] td_target [4846602.5] td_error [226039.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 125/200\n",
      "======================================\n",
      "state: [ 0.00298493 -0.23484169  0.03073579  0.26674779] reward: 1.0 action: 0\n",
      "value_next [1354100.2] td_target [1286396.2] td_error [332554.5]\n",
      "state: [-0.00171191 -0.43038852  0.03607074  0.56896439] reward: 1.0 action: 0\n",
      "value_next [1764590.] td_target [1676361.5] td_error [318834.62]\n",
      "state: [-0.01031968 -0.62599732  0.04745003  0.87278924] reward: 1.0 action: 0\n",
      "value_next [2184302.] td_target [2075087.9] td_error [306170.62]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.02283962 -0.82173129  0.06490582  1.1800048 ] reward: 1.0 action: 0\n",
      "value_next [2614780.5] td_target [2484042.5] td_error [294497.25]\n",
      "state: [-0.03927425 -1.01763306  0.08850591  1.49230762] reward: 1.0 action: 0\n",
      "value_next [3057592.8] td_target [2904714.] td_error [283701.75]\n",
      "state: [-0.05962691 -1.21371356  0.11835206  1.81126394] reward: 1.0 action: 0\n",
      "value_next [3514272.5] td_target [3338559.8] td_error [273626.25]\n",
      "state: [-0.08390118 -1.40993895  0.15457734  2.13825733] reward: 1.0 action: 0\n",
      "value_next [3986254.8] td_target [3786943.] td_error [264061.5]\n",
      "state: [-0.11209996 -1.60621541  0.19734249  2.4744266 ] reward: 1.0 action: 0\n",
      "value_next [4474797.] td_target [4251058.] td_error [254734.75]\n",
      "state: [-0.14422427 -1.80237171  0.24683102  2.82059313] reward: 1.0 action: 0\n",
      "value_next [4980896.5] td_target [4731852.5] td_error [245312.5]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 126/200\n",
      "======================================\n",
      "state: [-0.04034151 -0.24328893 -0.04442026  0.29761293] reward: 1.0 action: 0\n",
      "value_next [1395477.1] td_target [1325704.2] td_error [319519.7]\n",
      "state: [-0.04520729 -0.43775044 -0.038468    0.57596222] reward: 1.0 action: 0\n",
      "value_next [1795193.] td_target [1705434.4] td_error [306432.88]\n",
      "state: [-0.0539623  -0.63231264 -0.02694876  0.8562828 ] reward: 1.0 action: 0\n",
      "value_next [2204238.5] td_target [2094027.5] td_error [294458.12]\n",
      "state: [-0.06660855 -0.82705723 -0.0098231   1.14037164] reward: 1.0 action: 0\n",
      "value_next [2624213.5] td_target [2493003.8] td_error [283529.25]\n",
      "state: [-0.0831497  -1.02204939  0.01298433  1.42995783] reward: 1.0 action: 0\n",
      "value_next [3056750.2] td_target [2903913.8] td_error [273547.]\n",
      "state: [-0.10359068 -1.21732923  0.04158349  1.72667018] reward: 1.0 action: 0\n",
      "value_next [3503467.8] td_target [3328295.2] td_error [264373.5]\n",
      "state: [-0.12793727 -1.41290121  0.07611689  2.03199657] reward: 1.0 action: 0\n",
      "value_next [3965921.2] td_target [3767626.2] td_error [255831.]\n",
      "state: [-0.15619529 -1.6087214   0.11675682  2.34723312] reward: 1.0 action: 0\n",
      "value_next [4445542.5] td_target [4223266.5] td_error [247691.75]\n",
      "state: [-0.18836972 -1.8046819   0.16370148  2.6734212 ] reward: 1.0 action: 0\n",
      "value_next [4943558.5] td_target [4696381.5] td_error [239664.5]\n",
      "state: [-0.22446336 -2.00059274  0.21716991  3.01127227] reward: 1.0 action: 0\n",
      "value_next [5460908.5] td_target [5187864.] td_error [231392.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 127/200\n",
      "======================================\n",
      "state: [-0.03478691 -0.21604972 -0.04521545  0.31359971] reward: 1.0 action: 0\n",
      "value_next [1416982.1] td_target [1346134.] td_error [327707.94]\n",
      "state: [-0.03910791 -0.41049937 -0.03894346  0.59168726] reward: 1.0 action: 0\n",
      "value_next [1826241.9] td_target [1734930.8] td_error [314404.88]\n",
      "state: [-0.04731789 -0.60505509 -0.02710971  0.8718531 ] reward: 1.0 action: 0\n",
      "value_next [2245147.] td_target [2132890.5] td_error [302237.]\n",
      "state: [-0.059419   -0.79979807 -0.00967265  1.15589096] reward: 1.0 action: 0\n",
      "value_next [2675330.] td_target [2541564.5] td_error [291133.5]\n",
      "state: [-0.07541496 -0.99479259  0.01344517  1.44552533] reward: 1.0 action: 0\n",
      "value_next [3118456.2] td_target [2962534.5] td_error [280991.25]\n",
      "state: [-0.09531081 -1.19007735  0.04235568  1.74237876] reward: 1.0 action: 0\n",
      "value_next [3576175.2] td_target [3397367.5] td_error [271668.]\n",
      "state: [-0.11911236 -1.38565494  0.07720325  2.04793089] reward: 1.0 action: 0\n",
      "value_next [4050069.5] td_target [3847567.] td_error [262979.75]\n",
      "state: [-0.14682545 -1.58147889  0.11816187  2.36346708] reward: 1.0 action: 0\n",
      "value_next [4541589.] td_target [4314510.5] td_error [254688.75]\n",
      "state: [-0.17845503 -1.77743801  0.16543121  2.69001492] reward: 1.0 action: 0\n",
      "value_next [5051975.5] td_target [4799377.5] td_error [246497.]\n",
      "state: [-0.21400379 -1.97333814  0.21923151  3.02826854] reward: 1.0 action: 0\n",
      "value_next [5582168.5] td_target [5303061.] td_error [238034.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 128/200\n",
      "======================================\n",
      "state: [-0.01788315 -0.18767097  0.03247144  0.35095342] reward: 1.0 action: 0\n",
      "value_next [1500067.4] td_target [1425065.] td_error [355475.]\n",
      "state: [-0.02163657 -0.3832393   0.03949051  0.6536962 ] reward: 1.0 action: 0\n",
      "value_next [1942008.1] td_target [1844908.8] td_error [341135.88]\n",
      "state: [-0.02930135 -0.57888824  0.05256443  0.958548  ] reward: 1.0 action: 0\n",
      "value_next [2394289.8] td_target [2274576.2] td_error [327910.62]\n",
      "state: [-0.04087912 -0.77467601  0.07173539  1.26727085] reward: 1.0 action: 0\n",
      "value_next [2858562.8] td_target [2715635.5] td_error [315714.75]\n",
      "state: [-0.05637264 -0.97063727  0.09708081  1.58152887] reward: 1.0 action: 0\n",
      "value_next [3336489.2] td_target [3169665.8] td_error [304417.25]\n",
      "state: [-0.07578539 -1.16677141  0.12871139  1.90284174] reward: 1.0 action: 0\n",
      "value_next [3829675.2] td_target [3638192.5] td_error [293832.75]\n",
      "state: [-0.09912081 -1.36302894  0.16676822  2.23252967] reward: 1.0 action: 0\n",
      "value_next [4339597.5] td_target [4122618.5] td_error [283716.]\n",
      "state: [-0.12638139 -1.5592956   0.21141882  2.57164817] reward: 1.0 action: 0\n",
      "value_next [4867523.5] td_target [4624148.5] td_error [273762.]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 129/200\n",
      "=======================================\n",
      "state: [ 0.02970832 -0.22842133  0.03663244  0.31860721] reward: 1.0 action: 0\n",
      "value_next [1508972.9] td_target [1433525.2] td_error [364881.]\n",
      "state: [ 0.0251399  -0.42404535  0.04300458  0.6226139 ] reward: 1.0 action: 0\n",
      "value_next [1960646.1] td_target [1862614.8] td_error [349999.12]\n",
      "state: [ 0.01665899 -0.61974056  0.05545686  0.92852478] reward: 1.0 action: 0\n",
      "value_next [2422621.8] td_target [2301491.8] td_error [336265.88]\n",
      "state: [ 0.00426418 -0.81556557  0.07402736  1.23810668] reward: 1.0 action: 0\n",
      "value_next [2896591.5] td_target [2751763.] td_error [323597.5]\n",
      "state: [-0.01204713 -1.01155626  0.09878949  1.55303171] reward: 1.0 action: 0\n",
      "value_next [3384258.2] td_target [3215046.2] td_error [311860.25]\n",
      "state: [-0.03227826 -1.20771409  0.12985013  1.87483074] reward: 1.0 action: 0\n",
      "value_next [3887271.2] td_target [3692908.8] td_error [300869.]\n",
      "state: [-0.05643254 -1.40399258  0.16734674  2.20483862] reward: 1.0 action: 0\n",
      "value_next [4407150.5] td_target [4186794.] td_error [290379.25]\n",
      "state: [-0.08451239 -1.60028151  0.21144351  2.54412944] reward: 1.0 action: 0\n",
      "value_next [4945201.5] td_target [4697942.5] td_error [280077.5]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 130/200\n",
      "=======================================\n",
      "state: [ 0.01776772 -0.24307699 -0.03698152  0.2564743 ] reward: 1.0 action: 0\n",
      "value_next [1452794.1] td_target [1380155.4] td_error [354099.8]\n",
      "state: [ 0.01290618 -0.43765197 -0.03185203  0.53726712] reward: 1.0 action: 0\n",
      "value_next [1890205.1] td_target [1795695.9] td_error [339455.75]\n",
      "state: [ 0.00415314 -0.63231195 -0.02110669  0.81974607] reward: 1.0 action: 0\n",
      "value_next [2337427.5] td_target [2220557.] td_error [326041.62]\n",
      "state: [-0.0084931  -0.82713877 -0.00471177  1.10571631] reward: 1.0 action: 0\n",
      "value_next [2796208.] td_target [2656398.5] td_error [313787.]\n",
      "state: [-0.02503587 -1.02219846  0.01740256  1.39691733] reward: 1.0 action: 0\n",
      "value_next [3268327.5] td_target [3104912.] td_error [302583.5]\n",
      "state: [-0.04547984 -1.21753248  0.04534091  1.69499009] reward: 1.0 action: 0\n",
      "value_next [3755553.] td_target [3567776.2] td_error [292283.25]\n",
      "state: [-0.06983049 -1.41314734  0.07924071  2.00143649] reward: 1.0 action: 0\n",
      "value_next [4259582.5] td_target [4046604.2] td_error [282694.]\n",
      "state: [-0.09809344 -1.60900182  0.11926944  2.31756875] reward: 1.0 action: 0\n",
      "value_next [4781976.5] td_target [4542878.5] td_error [273566.5]\n",
      "state: [-0.13027347 -1.80499156  0.16562081  2.64444697] reward: 1.0 action: 0\n",
      "value_next [5324079.5] td_target [5057876.5] td_error [264589.5]\n",
      "state: [-0.1663733  -2.00093107  0.21850975  2.98280467] reward: 1.0 action: 0\n",
      "value_next [5886921.5] td_target [5592576.5] td_error [255373.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 131/200\n",
      "=======================================\n",
      "state: [-0.02774615 -0.17571852 -0.00891118  0.33768746] reward: 1.0 action: 0\n",
      "value_next [1549766.8] td_target [1472279.4] td_error [367562.75]\n",
      "state: [-0.03126052 -0.37071254 -0.00215743  0.62754702] reward: 1.0 action: 0\n",
      "value_next [2006521.2] td_target [1906196.1] td_error [352733.12]\n",
      "state: [-0.03867477 -0.56580431  0.01039351  0.91954972] reward: 1.0 action: 0\n",
      "value_next [2473970.] td_target [2350272.5] td_error [339123.12]\n",
      "state: [-0.04999085 -0.76106519  0.0287845   1.21548085] reward: 1.0 action: 0\n",
      "value_next [2953885.8] td_target [2806192.5] td_error [326648.75]\n",
      "state: [-0.06521216 -0.9565464   0.05309412  1.51704267] reward: 1.0 action: 0\n",
      "value_next [3448062.5] td_target [3275660.2] td_error [315183.25]\n",
      "state: [-0.08434309 -1.15226888  0.08343497  1.82581502] reward: 1.0 action: 0\n",
      "value_next [3958256.5] td_target [3760344.8] td_error [304551.75]\n",
      "state: [-0.10738846 -1.34821124  0.11995127  2.14320757] reward: 1.0 action: 0\n",
      "value_next [4486119.] td_target [4261814.] td_error [294524.25]\n",
      "state: [-0.13435269 -1.54429532  0.16281542  2.4704014 ] reward: 1.0 action: 0\n",
      "value_next [5033118.5] td_target [4781463.5] td_error [284810.]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.16523859 -1.74036913  0.21222345  2.80827898] reward: 1.0 action: 0\n",
      "value_next [5600442.5] td_target [5320421.5] td_error [275041.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 132/200\n",
      "========================================\n",
      "state: [ 0.01896829 -0.17466192 -0.02000319  0.30576197] reward: 1.0 action: 0\n",
      "value_next [1512146.] td_target [1436539.6] td_error [375161.88]\n",
      "state: [ 0.01547505 -0.3694932  -0.01388795  0.59206987] reward: 1.0 action: 0\n",
      "value_next [1974385.] td_target [1875666.8] td_error [359967.5]\n",
      "state: [ 0.00808519 -0.564418   -0.00204656  0.88034595] reward: 1.0 action: 0\n",
      "value_next [2447259.8] td_target [2324897.8] td_error [346035.38]\n",
      "state: [-0.00320317 -0.75951209  0.01556036  1.17238478] reward: 1.0 action: 0\n",
      "value_next [2932582.5] td_target [2785954.2] td_error [333285.5]\n",
      "state: [-0.01839341 -0.95483284  0.03900806  1.469905  ] reward: 1.0 action: 0\n",
      "value_next [3432190.] td_target [3260581.5] td_error [321593.75]\n",
      "state: [-0.03749007 -1.15040968  0.06840616  1.77451256] reward: 1.0 action: 0\n",
      "value_next [3947892.] td_target [3750498.2] td_error [310790.75]\n",
      "state: [-0.06049826 -1.34623269  0.10389641  2.08765582] reward: 1.0 action: 0\n",
      "value_next [4481404.5] td_target [4257335.] td_error [300656.]\n",
      "state: [-0.08742292 -1.54223874  0.14564953  2.4105701 ] reward: 1.0 action: 0\n",
      "value_next [5034272.5] td_target [4782560.] td_error [290907.5]\n",
      "state: [-0.11826769 -1.73829513  0.19386093  2.7442106 ] reward: 1.0 action: 0\n",
      "value_next [5607783.5] td_target [5327395.5] td_error [281188.5]\n",
      "state: [-0.1530336  -1.93418085  0.24874514  3.08917389] reward: 1.0 action: 0\n",
      "value_next [6202852.] td_target [5892710.5] td_error [271063.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 133/200\n",
      "========================================\n",
      "state: [ 0.01930587 -0.18986322 -0.00322068  0.3341208 ] reward: 1.0 action: 0\n",
      "value_next [1601196.4] td_target [1521137.5] td_error [387033.12]\n",
      "state: [ 0.01550861 -0.38493919  0.00346173  0.62578635] reward: 1.0 action: 0\n",
      "value_next [2080450.8] td_target [1976429.2] td_error [371426.12]\n",
      "state: [ 0.00780983 -0.58010929  0.01597746  0.9195575 ] reward: 1.0 action: 0\n",
      "value_next [2570869.5] td_target [2442327.] td_error [357090.38]\n",
      "state: [-0.00379236 -0.77544351  0.03436861  1.21721861] reward: 1.0 action: 0\n",
      "value_next [3074295.8] td_target [2920582.] td_error [343938.5]\n",
      "state: [-0.01930123 -0.97099141  0.05871298  1.52046962] reward: 1.0 action: 0\n",
      "value_next [3592592.] td_target [3412963.2] td_error [331834.25]\n",
      "state: [-0.03872106 -1.16677178  0.08912237  1.83088585] reward: 1.0 action: 0\n",
      "value_next [4127578.8] td_target [3921200.8] td_error [320594.]\n",
      "state: [-0.06205649 -1.36276044  0.12574009  2.14986939] reward: 1.0 action: 0\n",
      "value_next [4680961.] td_target [4446914.] td_error [309974.75]\n",
      "state: [-0.0893117  -1.55887556  0.16873748  2.47858986] reward: 1.0 action: 0\n",
      "value_next [5254246.] td_target [4991534.5] td_error [299665.5]\n",
      "state: [-0.12048921 -1.75496058  0.21830928  2.81791381] reward: 1.0 action: 0\n",
      "value_next [5848646.] td_target [5556214.5] td_error [289281.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 134/200\n",
      "========================================\n",
      "state: [ 0.04791096 -0.22336869  0.04561056  0.35618289] reward: 1.0 action: 0\n",
      "value_next [1705282.5] td_target [1620019.4] td_error [407691.]\n",
      "state: [ 0.04344359 -0.41910844  0.05273422  0.66289202] reward: 1.0 action: 0\n",
      "value_next [2211025.2] td_target [2100475.] td_error [391182.75]\n",
      "state: [ 0.03506142 -0.61492287  0.06599206  0.97170203] reward: 1.0 action: 0\n",
      "value_next [2728414.8] td_target [2591995.] td_error [375929.5]\n",
      "state: [ 0.02276296 -0.8108654   0.0854261   1.28436366] reward: 1.0 action: 0\n",
      "value_next [3259309.8] td_target [3096345.2] td_error [361831.75]\n",
      "state: [ 0.00654566 -1.00696477  0.11111337  1.60252481] reward: 1.0 action: 0\n",
      "value_next [3805568.5] td_target [3615291.] td_error [348731.75]\n",
      "state: [-0.01359364 -1.20321279  0.14316387  1.92768168] reward: 1.0 action: 0\n",
      "value_next [4368974.] td_target [4150526.2] td_error [336411.75]\n",
      "state: [-0.0376579  -1.39955029  0.1817175   2.26112155] reward: 1.0 action: 0\n",
      "value_next [4951146.] td_target [4703589.5] td_error [324584.]\n",
      "state: [-0.0656489  -1.59585082  0.22693993  2.60385554] reward: 1.0 action: 0\n",
      "value_next [5553447.] td_target [5275775.5] td_error [312887.5]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 135/200\n",
      "========================================\n",
      "state: [ 0.02756203 -0.22986581  0.01460024  0.26444525] reward: 1.0 action: 0\n",
      "value_next [1643009.6] td_target [1560860.1] td_error [409435.38]\n",
      "state: [ 0.02296471 -0.42519308  0.01988915  0.56169724] reward: 1.0 action: 0\n",
      "value_next [2146620.5] td_target [2039290.5] td_error [392474.62]\n",
      "state: [ 0.01446085 -0.62058842  0.03112309  0.86057931] reward: 1.0 action: 0\n",
      "value_next [2661326.] td_target [2528260.8] td_error [376854.]\n",
      "state: [ 0.00204908 -0.81612009  0.04833468  1.16288349] reward: 1.0 action: 0\n",
      "value_next [3189049.2] td_target [3029597.8] td_error [362489.]\n",
      "state: [-0.01427332 -1.01183698  0.07159235  1.47032082] reward: 1.0 action: 0\n",
      "value_next [3731737.5] td_target [3545151.5] td_error [349243.25]\n",
      "state: [-0.03451006 -1.20775795  0.10099876  1.78447963] reward: 1.0 action: 0\n",
      "value_next [4291293.5] td_target [4076729.8] td_error [336925.75]\n",
      "state: [-0.05866522 -1.40385943  0.13668836  2.10677602] reward: 1.0 action: 0\n",
      "value_next [4869504.] td_target [4626029.5] td_error [325285.]\n",
      "state: [-0.08674241 -1.60006075  0.17882388  2.43839431] reward: 1.0 action: 0\n",
      "value_next [5467949.] td_target [5194552.5] td_error [314001.]\n",
      "state: [-0.11874362 -1.7962071   0.22759176  2.78021654] reward: 1.0 action: 0\n",
      "value_next [6087899.5] td_target [5783505.5] td_error [302670.5]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 136/200\n",
      "=========================================\n",
      "state: [-0.00267987 -0.24002399  0.04499874  0.33689274] reward: 1.0 action: 0\n",
      "value_next [1806559.4] td_target [1716232.4] td_error [421839.38]\n",
      "state: [-0.00748035 -0.43575646  0.05173659  0.6434194 ] reward: 1.0 action: 0\n",
      "value_next [2331857.5] td_target [2215265.5] td_error [404533.88]\n",
      "state: [-0.01619548 -0.63155987  0.06460498  0.95193529] reward: 1.0 action: 0\n",
      "value_next [2869067.5] td_target [2725615.] td_error [388546.5]\n",
      "state: [-0.02882668 -0.82748894  0.08364368  1.26419644] reward: 1.0 action: 0\n",
      "value_next [3420133.2] td_target [3249127.5] td_error [373776.75]\n",
      "state: [-0.04537646 -1.02357446  0.10892761  1.58185901] reward: 1.0 action: 0\n",
      "value_next [3987000.2] td_target [3787651.2] td_error [360063.]\n",
      "state: [-0.06584795 -1.21981118  0.14056479  1.90643094] reward: 1.0 action: 0\n",
      "value_next [4571540.] td_target [4342964.] td_error [347182.75]\n",
      "state: [-0.09024417 -1.41614382  0.17869341  2.23921536] reward: 1.0 action: 0\n",
      "value_next [5175458.] td_target [4916686.] td_error [334842.]\n",
      "state: [-0.11856705 -1.61245101  0.22347772  2.58124409] reward: 1.0 action: 0\n",
      "value_next [5800198.] td_target [5510189.] td_error [322670.]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 137/200\n",
      "=========================================\n",
      "state: [ 0.00130753 -0.21086394  0.02270305  0.3367012 ] reward: 1.0 action: 0\n",
      "value_next [1788792.4] td_target [1699353.8] td_error [424303.]\n",
      "state: [-0.00290975 -0.40630149  0.02943707  0.6364561 ] reward: 1.0 action: 0\n",
      "value_next [2315636.] td_target [2199855.2] td_error [406991.]\n",
      "state: [-0.01103578 -0.60182135  0.04216619  0.93826207] reward: 1.0 action: 0\n",
      "value_next [2854492.8] td_target [2711769.] td_error [391048.]\n",
      "state: [-0.02307221 -0.79748566  0.06093143  1.24389085] reward: 1.0 action: 0\n",
      "value_next [3407360.5] td_target [3236993.5] td_error [376373.25]\n",
      "state: [-0.03902192 -0.99333424  0.08580925  1.55502131] reward: 1.0 action: 0\n",
      "value_next [3976245.2] td_target [3777434.] td_error [362809.25]\n",
      "state: [-0.0588886  -1.18937324  0.11690968  1.87319484] reward: 1.0 action: 0\n",
      "value_next [4563087.5] td_target [4334934.] td_error [350140.75]\n",
      "state: [-0.08267607 -1.385562    0.15437357  2.19976235] reward: 1.0 action: 0\n",
      "value_next [5169678.5] td_target [4911195.5] td_error [338085.]\n",
      "state: [-0.11038731 -1.58179751  0.19836882  2.53582097] reward: 1.0 action: 0\n",
      "value_next [5797563.] td_target [5507686.] td_error [326281.5]\n",
      "state: [-0.14202326 -1.77789673  0.24908524  2.88214004] reward: 1.0 action: 0\n",
      "value_next [6447928.5] td_target [6125533.] td_error [314283.5]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 138/200\n",
      "=========================================\n",
      "state: [ 0.0065005  -0.16219951  0.02632832  0.32219746] reward: 1.0 action: 0\n",
      "value_next [1757682.9] td_target [1669799.8] td_error [436590.38]\n",
      "state: [ 0.00325651 -0.35768629  0.03277227  0.62306565] reward: 1.0 action: 0\n",
      "value_next [2295333.5] td_target [2180567.8] td_error [418863.5]\n",
      "state: [-0.00389722 -0.55325014  0.04523359  0.92588722] reward: 1.0 action: 0\n",
      "value_next [2845189.] td_target [2702930.5] td_error [402529.5]\n",
      "state: [-0.01496222 -0.74895282  0.06375133  1.23243512] reward: 1.0 action: 0\n",
      "value_next [3409271.2] td_target [3238808.8] td_error [387487.5]\n",
      "state: [-0.02994128 -0.94483401  0.08840003  1.54439022] reward: 1.0 action: 0\n",
      "value_next [3989614.5] td_target [3790134.8] td_error [373580.75]\n",
      "state: [-0.04883796 -1.1408999   0.11928784  1.86329645] reward: 1.0 action: 0\n",
      "value_next [4588188.] td_target [4358779.5] td_error [360589.]\n",
      "state: [-0.07165596 -1.33711001  0.15655377  2.19050761] reward: 1.0 action: 0\n",
      "value_next [5206812.] td_target [4946472.5] td_error [348226.]\n",
      "state: [-0.09839816 -1.5333617   0.20036392  2.52712407] reward: 1.0 action: 0\n",
      "value_next [5847058.] td_target [5554706.] td_error [336125.]\n",
      "state: [-0.12906539 -1.72947241  0.2509064   2.87391877] reward: 1.0 action: 0\n",
      "value_next [6510133.5] td_target [6184627.5] td_error [323832.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 139/200\n",
      "==========================================\n",
      "state: [-0.02025495 -0.17983813 -0.04169884  0.23301224] reward: 1.0 action: 0\n",
      "value_next [1680082.] td_target [1596078.9] td_error [426296.12]\n",
      "state: [-0.02385171 -0.37434023 -0.0370386   0.51225587] reward: 1.0 action: 0\n",
      "value_next [2202593.8] td_target [2092465.] td_error [408604.88]\n",
      "state: [-0.03133851 -0.56892143 -0.02679348  0.79304064] reward: 1.0 action: 0\n",
      "value_next [2736595.2] td_target [2599766.5] td_error [392404.5]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.04271694 -0.76366552 -0.01093267  1.07717559] reward: 1.0 action: 0\n",
      "value_next [3284170.2] td_target [3119962.8] td_error [377614.5]\n",
      "state: [-0.05799025 -0.95864137  0.01061085  1.36640773] reward: 1.0 action: 0\n",
      "value_next [3847445.5] td_target [3655074.2] td_error [364108.75]\n",
      "state: [-0.07716308 -1.15389454  0.037939    1.66239054] reward: 1.0 action: 0\n",
      "value_next [4428534.5] td_target [4207108.5] td_error [351715.25]\n",
      "state: [-0.10024097 -1.34943723  0.07118681  1.96664489] reward: 1.0 action: 0\n",
      "value_next [5029474.5] td_target [4778001.5] td_error [340210.5]\n",
      "state: [-0.12722972 -1.54523579  0.11051971  2.28051013] reward: 1.0 action: 0\n",
      "value_next [5652146.] td_target [5369539.5] td_error [329306.]\n",
      "state: [-0.15813443 -1.74119583  0.15612991  2.60508352] reward: 1.0 action: 0\n",
      "value_next [6298182.] td_target [5983274.] td_error [318637.5]\n",
      "state: [-0.19295835 -1.93714449  0.20823158  2.94114731] reward: 1.0 action: 0\n",
      "value_next [6968853.] td_target [6620411.5] td_error [307753.]\n",
      "state: [-0.23170124 -2.13281067  0.26705453  3.28908504] reward: 1.0 action: 0\n",
      "value_next [7664931.5] td_target [7281686.] td_error [296099.5]\n",
      "reward:11.0, max reward:12.0, episode len:11\n",
      "\n",
      "Episode 140/200\n",
      "==========================================\n",
      "state: [-0.00653696 -0.15103406 -0.04278713  0.26887657] reward: 1.0 action: 0\n",
      "value_next [1717454.9] td_target [1631583.1] td_error [436423.25]\n",
      "state: [-0.00955764 -0.34552011 -0.03740959  0.54776307] reward: 1.0 action: 0\n",
      "value_next [2252578.] td_target [2139950.] td_error [418616.5]\n",
      "state: [-0.01646805 -0.54009709 -0.02645433  0.82842848] reward: 1.0 action: 0\n",
      "value_next [2799804.] td_target [2659814.8] td_error [402318.5]\n",
      "state: [-0.02726999 -0.73484757 -0.00988576  1.11267531] reward: 1.0 action: 0\n",
      "value_next [3361252.8] td_target [3193191.] td_error [387442.25]\n",
      "state: [-0.04196694 -0.9298383   0.01236774  1.40224074] reward: 1.0 action: 0\n",
      "value_next [3939082.8] td_target [3742129.5] td_error [373854.5]\n",
      "state: [-0.06056371 -1.12511169  0.04041256  1.69876446] reward: 1.0 action: 0\n",
      "value_next [4535435.5] td_target [4308664.5] td_error [361375.]\n",
      "state: [-0.08306594 -1.32067548  0.07438785  2.00374869] reward: 1.0 action: 0\n",
      "value_next [5152364.5] td_target [4894747.] td_error [349769.]\n",
      "state: [-0.10947945 -1.51649008  0.11446282  2.31850817] reward: 1.0 action: 0\n",
      "value_next [5791757.] td_target [5502170.] td_error [338733.]\n",
      "state: [-0.13980925 -1.71245327  0.16083298  2.6441081 ] reward: 1.0 action: 0\n",
      "value_next [6455236.] td_target [6132475.] td_error [327887.]\n",
      "state: [-0.17405832 -1.9083823   0.21371515  2.98129002] reward: 1.0 action: 0\n",
      "value_next [7144040.] td_target [6786839.] td_error [316757.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 141/200\n",
      "==========================================\n",
      "state: [ 0.04543058 -0.18487326 -0.03422571  0.24059932] reward: 1.0 action: 0\n",
      "value_next [1726924.1] td_target [1640578.9] td_error [451159.25]\n",
      "state: [ 0.04173311 -0.37949    -0.02941372  0.52229309] reward: 1.0 action: 0\n",
      "value_next [2277105.5] td_target [2163251.2] td_error [432558.5]\n",
      "state: [ 0.03414331 -0.57418587 -0.01896786  0.80556417] reward: 1.0 action: 0\n",
      "value_next [2839393.] td_target [2697424.2] td_error [415511.25]\n",
      "state: [ 0.0226596  -0.76904274 -0.00285657  1.09222068] reward: 1.0 action: 0\n",
      "value_next [3415954.8] td_target [3245158.] td_error [399929.]\n",
      "state: [ 0.00727874 -0.96412693  0.01898784  1.38400594] reward: 1.0 action: 0\n",
      "value_next [4008999.2] td_target [3808550.2] td_error [385676.75]\n",
      "state: [-0.0120038  -1.15948049  0.04666796  1.68256556] reward: 1.0 action: 0\n",
      "value_next [4620713.5] td_target [4389679.] td_error [372568.75]\n",
      "state: [-0.03519341 -1.35511076  0.08031927  1.98940686] reward: 1.0 action: 0\n",
      "value_next [5253194.5] td_target [4990535.5] td_error [360362.5]\n",
      "state: [-0.06229562 -1.5509776   0.12010741  2.30584845] reward: 1.0 action: 0\n",
      "value_next [5908367.5] td_target [5612950.] td_error [348749.]\n",
      "state: [-0.09331517 -1.74697813  0.16622437  2.63295792] reward: 1.0 action: 0\n",
      "value_next [6587882.] td_target [6258489.] td_error [337336.5]\n",
      "state: [-0.12825474 -1.94292871  0.21888353  2.9714777 ] reward: 1.0 action: 0\n",
      "value_next [7292993.5] td_target [6928344.5] td_error [325637.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 142/200\n",
      "===========================================\n",
      "state: [-0.02221802 -0.19918722 -0.03477621  0.32075777] reward: 1.0 action: 0\n",
      "value_next [1928180.] td_target [1831772.] td_error [453528.5]\n",
      "state: [-0.02620177 -0.39379711 -0.02836106  0.60227395] reward: 1.0 action: 0\n",
      "value_next [2491994.2] td_target [2367395.5] td_error [435033.38]\n",
      "state: [-0.03407771 -0.58851112 -0.01631558  0.88589048] reward: 1.0 action: 0\n",
      "value_next [3068759.8] td_target [2915322.8] td_error [418101.75]\n",
      "state: [-0.04584793 -0.78340782  0.00140223  1.17340005] reward: 1.0 action: 0\n",
      "value_next [3660702.2] td_target [3477668.] td_error [402631.25]\n",
      "state: [-0.06151609 -0.97854797  0.02487023  1.46652224] reward: 1.0 action: 0\n",
      "value_next [4270074.5] td_target [4056571.8] td_error [388472.5]\n",
      "state: [-0.08108705 -1.17396543  0.05420068  1.76686888] reward: 1.0 action: 0\n",
      "value_next [4899092.5] td_target [4654139.] td_error [375421.5]\n",
      "state: [-0.10456636 -1.36965615  0.08953806  2.07590111] reward: 1.0 action: 0\n",
      "value_next [5549857.5] td_target [5272365.5] td_error [363213.]\n",
      "state: [-0.13195948 -1.5655648   0.13105608  2.39487594] reward: 1.0 action: 0\n",
      "value_next [6224272.5] td_target [5913060.] td_error [351512.5]\n",
      "state: [-0.16327077 -1.76156869  0.1789536   2.72478079] reward: 1.0 action: 0\n",
      "value_next [6923928.5] td_target [6577733.] td_error [339894.5]\n",
      "state: [-0.19850215 -1.9574592   0.23344921  3.06625613] reward: 1.0 action: 0\n",
      "value_next [7649975.5] td_target [7267477.5] td_error [327835.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 143/200\n",
      "===========================================\n",
      "state: [-0.03687958 -0.17780836  0.02180757  0.26031428] reward: 1.0 action: 0\n",
      "value_next [1935941.8] td_target [1839145.6] td_error [486084.75]\n",
      "state: [-0.04043575 -0.37323473  0.02701386  0.55979505] reward: 1.0 action: 0\n",
      "value_next [2532680.8] td_target [2406047.8] td_error [465898.62]\n",
      "state: [-0.04790044 -0.56872522  0.03820976  0.8608649 ] reward: 1.0 action: 0\n",
      "value_next [3142427.5] td_target [2985307.] td_error [447294.75]\n",
      "state: [-0.05927495 -0.76434611  0.05542706  1.165313  ] reward: 1.0 action: 0\n",
      "value_next [3767435.] td_target [3579064.2] td_error [430172.25]\n",
      "state: [-0.07456187 -0.96014398  0.07873332  1.47484599] reward: 1.0 action: 0\n",
      "value_next [4409980.] td_target [4189482.] td_error [414367.5]\n",
      "state: [-0.09376475 -1.1561348   0.10823024  1.79104517] reward: 1.0 action: 0\n",
      "value_next [5072284.] td_target [4818670.5] td_error [399653.]\n",
      "state: [-0.11688744 -1.35229131  0.14405114  2.11531592] reward: 1.0 action: 0\n",
      "value_next [5756417.5] td_target [5468597.5] td_error [385725.]\n",
      "state: [-0.14393327 -1.54852813  0.18635746  2.44882711] reward: 1.0 action: 0\n",
      "value_next [6464201.] td_target [6140992.] td_error [372198.5]\n",
      "state: [-0.17490383 -1.74468456  0.235334    2.79243979] reward: 1.0 action: 0\n",
      "value_next [7197074.5] td_target [6837221.5] td_error [358591.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 144/200\n",
      "===========================================\n",
      "state: [-0.01085971 -0.1686488   0.04935918  0.30976952] reward: 1.0 action: 0\n",
      "value_next [2024822.2] td_target [1923582.1] td_error [503708.75]\n",
      "state: [-0.01423268 -0.36443799  0.05555457  0.61760133] reward: 1.0 action: 0\n",
      "value_next [2644449.] td_target [2512227.5] td_error [483034.62]\n",
      "state: [-0.02152144 -0.56029019  0.0679066   0.92725105] reward: 1.0 action: 0\n",
      "value_next [3277787.] td_target [3113898.5] td_error [463929.5]\n",
      "state: [-0.03272724 -0.75626009  0.08645162  1.2404776 ] reward: 1.0 action: 0\n",
      "value_next [3927113.5] td_target [3730758.8] td_error [446277.25]\n",
      "state: [-0.04785245 -0.95237911  0.11126117  1.55894261] reward: 1.0 action: 0\n",
      "value_next [4594708.5] td_target [4364974.] td_error [429889.75]\n",
      "state: [-0.06690003 -1.14864327  0.14244002  1.8841619 ] reward: 1.0 action: 0\n",
      "value_next [5282763.] td_target [5018626.] td_error [414507.5]\n",
      "state: [-0.08987289 -1.34499929  0.18012326  2.21744895] reward: 1.0 action: 0\n",
      "value_next [5993273.5] td_target [5693611.] td_error [399785.]\n",
      "state: [-0.11677288 -1.54132848  0.22447224  2.55984866] reward: 1.0 action: 0\n",
      "value_next [6727925.] td_target [6391529.5] td_error [385285.5]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 145/200\n",
      "============================================\n",
      "state: [-0.01432821 -0.20141636 -0.00214303  0.2533313 ] reward: 1.0 action: 0\n",
      "value_next [1985252.] td_target [1885990.4] td_error [495774.62]\n",
      "state: [-0.01835654 -0.39650764  0.0029236   0.5453375 ] reward: 1.0 action: 0\n",
      "value_next [2594258.5] td_target [2464546.5] td_error [475115.]\n",
      "state: [-0.02628669 -0.59167055  0.01383035  0.83894015] reward: 1.0 action: 0\n",
      "value_next [3216467.2] td_target [3055644.8] td_error [456135.75]\n",
      "state: [-0.0381201  -0.7869786   0.03060915  1.1359402 ] reward: 1.0 action: 0\n",
      "value_next [3854248.2] td_target [3661536.8] td_error [438732.]\n",
      "state: [-0.05385968 -0.98248733  0.05332796  1.43806361] reward: 1.0 action: 0\n",
      "value_next [4509999.] td_target [4284500.] td_error [422741.]\n",
      "state: [-0.07350942 -1.1782244   0.08208923  1.74692281] reward: 1.0 action: 0\n",
      "value_next [5186069.] td_target [4926766.5] td_error [407939.5]\n",
      "state: [-0.09707391 -1.37417781  0.11702769  2.06397028] reward: 1.0 action: 0\n",
      "value_next [5884671.5] td_target [5590439.] td_error [394032.5]\n",
      "state: [-0.12455747 -1.57028194  0.15830709  2.39044213] reward: 1.0 action: 0\n",
      "value_next [6607789.] td_target [6277400.5] td_error [380646.5]\n",
      "state: [-0.15596311 -1.76640101  0.20611593  2.72729033] reward: 1.0 action: 0\n",
      "value_next [7357045.] td_target [6989193.5] td_error [367310.5]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.19129113 -1.96231038  0.26066174  3.07510414] reward: 1.0 action: 0\n",
      "value_next [8133572.5] td_target [7726895.] td_error [353452.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 146/200\n",
      "============================================\n",
      "state: [ 0.02717532 -0.14558153 -0.00332767  0.242837  ] reward: 1.0 action: 0\n",
      "value_next [1906080.2] td_target [1810777.2] td_error [510452.5]\n",
      "state: [ 0.02426369 -0.34065579  0.00152907  0.53446844] reward: 1.0 action: 0\n",
      "value_next [2525857.2] td_target [2399565.2] td_error [489432.5]\n",
      "state: [ 0.01745058 -0.53579921  0.01221844  0.82763277] reward: 1.0 action: 0\n",
      "value_next [3159109.5] td_target [3001155.] td_error [470117.5]\n",
      "state: [ 0.00673459 -0.73108608  0.02877109  1.12413333] reward: 1.0 action: 0\n",
      "value_next [3808228.] td_target [3617817.5] td_error [452404.75]\n",
      "state: [-0.00788713 -0.92657312  0.05125376  1.42570007] reward: 1.0 action: 0\n",
      "value_next [4475634.5] td_target [4251853.5] td_error [436132.]\n",
      "state: [-0.02641859 -1.12228966  0.07976776  1.73395137] reward: 1.0 action: 0\n",
      "value_next [5163708.] td_target [4905523.5] td_error [421075.5]\n",
      "state: [-0.04886439 -1.31822599  0.11444679  2.05034818] reward: 1.0 action: 0\n",
      "value_next [5874697.] td_target [5580963.] td_error [406940.]\n",
      "state: [-0.07522891 -1.51431949  0.15545375  2.37613816] reward: 1.0 action: 0\n",
      "value_next [6610617.5] td_target [6280087.5] td_error [393348.5]\n",
      "state: [-0.1055153  -1.71043829  0.20297651  2.71228852] reward: 1.0 action: 0\n",
      "value_next [7373133.5] td_target [7004477.5] td_error [379829.5]\n",
      "state: [-0.13972406 -1.90636256  0.25722228  3.05940787] reward: 1.0 action: 0\n",
      "value_next [8163412.] td_target [7755242.5] td_error [365800.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 147/200\n",
      "============================================\n",
      "state: [ 0.03092949 -0.22585223 -0.03699112  0.31673845] reward: 1.0 action: 0\n",
      "value_next [2106079.2] td_target [2000776.2] td_error [501085.62]\n",
      "state: [ 0.02641244 -0.42042831 -0.03065635  0.59752999] reward: 1.0 action: 0\n",
      "value_next [2727605.5] td_target [2591226.2] td_error [480678.25]\n",
      "state: [ 0.01800388 -0.61510817 -0.01870575  0.88040102] reward: 1.0 action: 0\n",
      "value_next [3363365.5] td_target [3195198.2] td_error [462002.]\n",
      "state: [ 5.70171216e-03 -8.09971075e-01 -1.09773006e-03  1.16714507e+00] reward: 1.0 action: 0\n",
      "value_next [4015810.2] td_target [3815020.8] td_error [444941.75]\n",
      "state: [-0.01049771 -1.00507872  0.02224517  1.45948363] reward: 1.0 action: 0\n",
      "value_next [4687418.5] td_target [4453048.5] td_error [429332.5]\n",
      "state: [-0.03059928 -1.2004663   0.05143484  1.75903202] reward: 1.0 action: 0\n",
      "value_next [5380622.5] td_target [5111592.5] td_error [414950.]\n",
      "state: [-0.05460861 -1.39613156  0.08661548  2.06725695] reward: 1.0 action: 0\n",
      "value_next [6097730.5] td_target [5792845.] td_error [401505.5]\n",
      "state: [-0.08253124 -1.59202156  0.12796062  2.38542355] reward: 1.0 action: 0\n",
      "value_next [6840828.5] td_target [6498788.] td_error [388629.5]\n",
      "state: [-0.11437167 -1.78801672  0.17566909  2.7145305 ] reward: 1.0 action: 0\n",
      "value_next [7611664.] td_target [7231081.5] td_error [375858.5]\n",
      "state: [-0.15013201 -1.98391231  0.2299597   3.05523308] reward: 1.0 action: 0\n",
      "value_next [8411501.] td_target [7990927.] td_error [362620.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 148/200\n",
      "============================================\n",
      "state: [-0.00763859 -0.18964849  0.03301268  0.34668267] reward: 1.0 action: 0\n",
      "value_next [2247290.8] td_target [2134927.2] td_error [536333.]\n",
      "state: [-0.01143156 -0.38522409  0.03994634  0.64959018] reward: 1.0 action: 0\n",
      "value_next [2912107.5] td_target [2766503.] td_error [514496.]\n",
      "state: [-0.01913605 -0.58087905  0.05293814  0.95457959] reward: 1.0 action: 0\n",
      "value_next [3591994.] td_target [3412395.2] td_error [494357.25]\n",
      "state: [-0.03075363 -0.77667164  0.07202973  1.2634136 ] reward: 1.0 action: 0\n",
      "value_next [4289416.] td_target [4074946.2] td_error [475785.75]\n",
      "state: [-0.04628706 -0.97263669  0.09729801  1.57775741] reward: 1.0 action: 0\n",
      "value_next [5006835.] td_target [4756494.] td_error [458577.5]\n",
      "state: [-0.06573979 -1.16877389  0.12885315  1.89913219] reward: 1.0 action: 0\n",
      "value_next [5746612.5] td_target [5459283.] td_error [442451.5]\n",
      "state: [-0.08911527 -1.36503414  0.1668358   2.22886012] reward: 1.0 action: 0\n",
      "value_next [6510905.5] td_target [6185361.] td_error [427039.5]\n",
      "state: [-0.11641595 -1.56130372  0.211413    2.56799924] reward: 1.0 action: 0\n",
      "value_next [7301541.] td_target [6936465.] td_error [411879.5]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 149/200\n",
      "=============================================\n",
      "state: [-0.0180388  -0.24361446 -0.00694787  0.25192683] reward: 1.0 action: 0\n",
      "value_next [2199474.5] td_target [2089501.8] td_error [532364.1]\n",
      "state: [-0.02291109 -0.43863652 -0.00190933  0.54241019] reward: 1.0 action: 0\n",
      "value_next [2856820.5] td_target [2713980.5] td_error [510027.]\n",
      "state: [-0.03168382 -0.63373158  0.00893887  0.83449091] reward: 1.0 action: 0\n",
      "value_next [3528356.] td_target [3351939.2] td_error [489510.5]\n",
      "state: [-0.04435845 -0.82897451  0.02562869  1.12997159] reward: 1.0 action: 0\n",
      "value_next [4216643.5] td_target [4005812.2] td_error [470701.75]\n",
      "state: [-0.06093794 -1.02442254  0.04822812  1.43058137] reward: 1.0 action: 0\n",
      "value_next [4924272.5] td_target [4678060.] td_error [453428.5]\n",
      "state: [-0.08142639 -1.22010554  0.07683975  1.73793816] reward: 1.0 action: 0\n",
      "value_next [5653781.] td_target [5371093.] td_error [437450.5]\n",
      "state: [-0.1058285  -1.41601448  0.11159851  2.05350312] reward: 1.0 action: 0\n",
      "value_next [6407561.] td_target [6087184.] td_error [422453.]\n",
      "state: [-0.13414879 -1.61208756  0.15266858  2.37852508] reward: 1.0 action: 0\n",
      "value_next [7187759.5] td_target [6828372.5] td_error [408039.5]\n",
      "state: [-0.16639054 -1.80819391  0.20023908  2.71397358] reward: 1.0 action: 0\n",
      "value_next [7996141.5] td_target [7596335.5] td_error [393706.]\n",
      "state: [-0.20255442 -2.00411495  0.25451855  3.06046078] reward: 1.0 action: 0\n",
      "value_next [8833950.] td_target [8392253.] td_error [378839.5]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 150/200\n",
      "=============================================\n",
      "state: [ 0.00119833 -0.22549053  0.04459478  0.35436701] reward: 1.0 action: 0\n",
      "value_next [2392701.2] td_target [2273067.2] td_error [558837.]\n",
      "state: [-0.00331148 -0.42121727  0.05168212  0.66077175] reward: 1.0 action: 0\n",
      "value_next [3088079.8] td_target [2933676.8] td_error [535992.]\n",
      "state: [-0.01173583 -0.61701886  0.06489756  0.96926975] reward: 1.0 action: 0\n",
      "value_next [3799174.2] td_target [3609216.5] td_error [514898.]\n",
      "state: [-0.02407621 -0.81294919  0.08428295  1.28161306] reward: 1.0 action: 0\n",
      "value_next [4528539.] td_target [4302113.] td_error [495412.]\n",
      "state: [-0.04033519 -1.00903758  0.10991521  1.59945142] reward: 1.0 action: 0\n",
      "value_next [5278712.] td_target [5014777.5] td_error [477313.]\n",
      "state: [-0.06051594 -1.20527665  0.14190424  1.92428374] reward: 1.0 action: 0\n",
      "value_next [6052116.5] td_target [5749511.5] td_error [460300.]\n",
      "state: [-0.08462147 -1.40160825  0.18038992  2.25740096] reward: 1.0 action: 0\n",
      "value_next [6850940.] td_target [6508394.] td_error [443979.]\n",
      "state: [-0.11265364 -1.59790719  0.22553794  2.59981906] reward: 1.0 action: 0\n",
      "value_next [7677008.] td_target [7293158.5] td_error [427847.5]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 151/200\n",
      "=============================================\n",
      "state: [ 0.02503105 -0.17540797  0.0090281   0.31557985] reward: 1.0 action: 0\n",
      "value_next [2249643.8] td_target [2137162.5] td_error [560204.5]\n",
      "state: [ 0.02152289 -0.37065735  0.0153397   0.6110962 ] reward: 1.0 action: 0\n",
      "value_next [2938474.5] td_target [2791551.8] td_error [537346.75]\n",
      "state: [ 0.01410974 -0.56599031  0.02756162  0.90857087] reward: 1.0 action: 0\n",
      "value_next [3642685.] td_target [3460551.8] td_error [516334.75]\n",
      "state: [ 0.00278994 -0.76147428  0.04573304  1.20978754] reward: 1.0 action: 0\n",
      "value_next [4364906.5] td_target [4146662.] td_error [497035.5]\n",
      "state: [-0.01243955 -0.95715599  0.06992879  1.51644404] reward: 1.0 action: 0\n",
      "value_next [5107778.] td_target [4852390.] td_error [479250.5]\n",
      "state: [-0.03158267 -1.15305082  0.10025767  1.83011043] reward: 1.0 action: 0\n",
      "value_next [5873856.] td_target [5580164.] td_error [462704.5]\n",
      "state: [-0.05464369 -1.34913021  0.13685988  2.15217898] reward: 1.0 action: 0\n",
      "value_next [6665511.] td_target [6332236.5] td_error [447041.5]\n",
      "state: [-0.08162629 -1.5453068   0.17990346  2.48380358] reward: 1.0 action: 0\n",
      "value_next [7484811.5] td_target [7110572.] td_error [431808.]\n",
      "state: [-0.11253243 -1.74141711  0.22957953  2.82582827] reward: 1.0 action: 0\n",
      "value_next [8333380.5] td_target [7916712.5] td_error [416442.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 152/200\n",
      "==============================================\n",
      "state: [-0.04317957 -0.22836228 -0.0353549   0.31363381] reward: 1.0 action: 0\n",
      "value_next [2381580.8] td_target [2262502.8] td_error [546043.25]\n",
      "state: [-0.04774681 -0.42296322 -0.02908223  0.59496062] reward: 1.0 action: 0\n",
      "value_next [3063041.8] td_target [2909890.8] td_error [523541.25]\n",
      "state: [-0.05620608 -0.61766632 -0.01718301  0.8783428 ] reward: 1.0 action: 0\n",
      "value_next [3759905.8] td_target [3571911.5] td_error [502951.25]\n",
      "state: [-6.85594055e-02 -8.12550609e-01  3.83842634e-04  1.16557453e+00] reward: 1.0 action: 0\n",
      "value_next [4474879.] td_target [4251136.] td_error [484146.75]\n",
      "state: [-0.08481042 -1.00767755  0.02369533  1.45837778] reward: 1.0 action: 0\n",
      "value_next [5210690.5] td_target [4950157.] td_error [466944.]\n",
      "state: [-0.10496397 -1.20308201  0.05286289  1.75836796] reward: 1.0 action: 0\n",
      "value_next [5970016.5] td_target [5671516.5] td_error [451097.]\n",
      "state: [-0.12902561 -1.39876133  0.08803025  2.06701136] reward: 1.0 action: 0\n",
      "value_next [6755392.5] td_target [6417624.] td_error [436287.5]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.15700084 -1.59466199  0.12937048  2.385572  ] reward: 1.0 action: 0\n",
      "value_next [7569100.5] td_target [7190646.5] td_error [422104.5]\n",
      "state: [-0.18889408 -1.79066369  0.17708192  2.71504657] reward: 1.0 action: 0\n",
      "value_next [8413052.] td_target [7992400.5] td_error [408043.5]\n",
      "state: [-0.22470735 -1.98656082  0.23138285  3.0560875 ] reward: 1.0 action: 0\n",
      "value_next [9288619.] td_target [8824189.] td_error [393470.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 153/200\n",
      "==============================================\n",
      "state: [ 0.04427405 -0.23403172 -0.01065588  0.26348323] reward: 1.0 action: 0\n",
      "value_next [2303298.2] td_target [2188134.2] td_error [574438.6]\n",
      "state: [ 0.03959341 -0.42899996 -0.00538622  0.55278622] reward: 1.0 action: 0\n",
      "value_next [3008962.] td_target [2858514.8] td_error [550619.5]\n",
      "state: [ 0.03101341 -0.62404586  0.00566951  0.84376728] reward: 1.0 action: 0\n",
      "value_next [3730031.5] td_target [3543531.] td_error [528750.75]\n",
      "state: [ 0.0185325  -0.81924472  0.02254485  1.13822769] reward: 1.0 action: 0\n",
      "value_next [4469243.5] td_target [4245782.5] td_error [508708.25]\n",
      "state: [ 0.0021476  -1.01465413  0.04530941  1.43789495] reward: 1.0 action: 0\n",
      "value_next [5229359.] td_target [4967892.] td_error [490304.5]\n",
      "state: [-0.01814548 -1.21030426  0.07406731  1.74438536] reward: 1.0 action: 0\n",
      "value_next [6013081.] td_target [5712428.] td_error [473282.]\n",
      "state: [-0.04235156 -1.4061864   0.10895501  2.05915887] reward: 1.0 action: 0\n",
      "value_next [6822953.5] td_target [6481806.5] td_error [457302.5]\n",
      "state: [-0.07047529 -1.60223914  0.15013819  2.3834636 ] reward: 1.0 action: 0\n",
      "value_next [7661258.5] td_target [7278196.5] td_error [441940.5]\n",
      "state: [-0.10252008 -1.79833208  0.19780746  2.71826905] reward: 1.0 action: 0\n",
      "value_next [8529868.] td_target [8103375.5] td_error [426656.5]\n",
      "state: [-0.13848672 -1.99424716  0.25217284  3.06418798] reward: 1.0 action: 0\n",
      "value_next [9430091.] td_target [8958587.] td_error [410792.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 154/200\n",
      "==============================================\n",
      "state: [-0.00529152 -0.1765937  -0.02827731  0.25053356] reward: 1.0 action: 0\n",
      "value_next [2278788.2] td_target [2164849.8] td_error [579560.]\n",
      "state: [-0.0088234  -0.37130067 -0.02326664  0.53416484] reward: 1.0 action: 0\n",
      "value_next [2988311.5] td_target [2838897.] td_error [555547.]\n",
      "state: [-0.01624941 -0.56608782 -0.01258334  0.81942672] reward: 1.0 action: 0\n",
      "value_next [3713287.] td_target [3527623.5] td_error [533535.5]\n",
      "state: [-0.02757117 -0.76103531  0.00380519  1.1081254 ] reward: 1.0 action: 0\n",
      "value_next [4456499.] td_target [4233675.] td_error [513406.5]\n",
      "state: [-0.04279187 -0.95620707  0.0259677   1.40199964] reward: 1.0 action: 0\n",
      "value_next [5220766.] td_target [4959728.5] td_error [494978.5]\n",
      "state: [-0.06191601 -1.15164184  0.05400769  1.7026866 ] reward: 1.0 action: 0\n",
      "value_next [6008868.5] td_target [5708426.] td_error [478007.5]\n",
      "state: [-0.08494885 -1.34734242  0.08806143  2.01168001] reward: 1.0 action: 0\n",
      "value_next [6823449.5] td_target [6482278.] td_error [462171.]\n",
      "state: [-0.1118957  -1.54326267  0.12829503  2.33027823] reward: 1.0 action: 0\n",
      "value_next [7666909.5] td_target [7283565.] td_error [447056.5]\n",
      "state: [-0.14276095 -1.73929183  0.17490059  2.65952071] reward: 1.0 action: 0\n",
      "value_next [8541279.] td_target [8114216.] td_error [432152.5]\n",
      "state: [-0.17754679 -1.93523637  0.22809101  3.0001125 ] reward: 1.0 action: 0\n",
      "value_next [9448053.] td_target [8975651.] td_error [416816.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 155/200\n",
      "==============================================\n",
      "state: [ 0.04916496 -0.23574842  0.02627521  0.31162378] reward: 1.0 action: 0\n",
      "value_next [2505375.2] td_target [2380107.5] td_error [610426.9]\n",
      "state: [ 0.04444999 -0.43123466  0.03250768  0.61247594] reward: 1.0 action: 0\n",
      "value_next [3258547.] td_target [3095620.5] td_error [585297.]\n",
      "state: [ 0.0358253  -0.62679548  0.0447572   0.91521777] reward: 1.0 action: 0\n",
      "value_next [4028344.5] td_target [3826928.2] td_error [562136.]\n",
      "state: [ 0.02328939 -0.82249323  0.06306156  1.22162465] reward: 1.0 action: 0\n",
      "value_next [4817582.5] td_target [4576704.5] td_error [540798.75]\n",
      "state: [ 0.00683953 -1.0183685   0.08749405  1.53338136] reward: 1.0 action: 0\n",
      "value_next [5629071.] td_target [5347618.5] td_error [521061.5]\n",
      "state: [-0.01352784 -1.21442887  0.11816168  1.8520374 ] reward: 1.0 action: 0\n",
      "value_next [6465518.] td_target [6142243.] td_error [502617.]\n",
      "state: [-0.03781642 -1.41063569  0.15520243  2.17895416] reward: 1.0 action: 0\n",
      "value_next [7329410.] td_target [6962940.5] td_error [485064.]\n",
      "state: [-0.06602914 -1.60688874  0.19878151  2.51524193] reward: 1.0 action: 0\n",
      "value_next [8222878.5] td_target [7811735.5] td_error [467895.5]\n",
      "state: [-0.09816691 -1.80300855  0.24908635  2.86168628] reward: 1.0 action: 0\n",
      "value_next [9147539.] td_target [8690163.] td_error [450477.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 156/200\n",
      "===============================================\n",
      "state: [ 0.03322936 -0.19220896 -0.01912064  0.26634015] reward: 1.0 action: 0\n",
      "value_next [2380794.2] td_target [2261755.5] td_error [605891.1]\n",
      "state: [ 0.02938518 -0.38705287 -0.01379384  0.55293147] reward: 1.0 action: 0\n",
      "value_next [3122450.] td_target [2966328.5] td_error [580844.75]\n",
      "state: [ 0.02164412 -0.58197843 -0.00273521  0.84123672] reward: 1.0 action: 0\n",
      "value_next [3880256.5] td_target [3686244.8] td_error [557869.5]\n",
      "state: [ 0.01000455 -0.77706293  0.01408953  1.13305824] reward: 1.0 action: 0\n",
      "value_next [4657103.] td_target [4424249.] td_error [536835.75]\n",
      "state: [-0.0055367  -0.97236644  0.03675069  1.4301266 ] reward: 1.0 action: 0\n",
      "value_next [5455906.5] td_target [5183112.] td_error [517546.]\n",
      "state: [-0.02498403 -1.16792234  0.06535322  1.73406466] reward: 1.0 action: 0\n",
      "value_next [6279523.] td_target [5965548.] td_error [499734.5]\n",
      "state: [-0.04834248 -1.3637262   0.10003452  2.04634362] reward: 1.0 action: 0\n",
      "value_next [7130657.] td_target [6774125.] td_error [483052.]\n",
      "state: [-0.075617   -1.55972221  0.14096139  2.368229  ] reward: 1.0 action: 0\n",
      "value_next [8011735.5] td_target [7611149.5] td_error [467058.]\n",
      "state: [-0.10681145 -1.75578715  0.18832597  2.70071492] reward: 1.0 action: 0\n",
      "value_next [8924773.] td_target [8478535.] td_error [451193.5]\n",
      "state: [-0.14192719 -1.95171188  0.24234027  3.04444693] reward: 1.0 action: 0\n",
      "value_next [9871203.] td_target [9377644.] td_error [434776.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 157/200\n",
      "===============================================\n",
      "state: [ 0.03966461 -0.19847916 -0.05007414  0.25023672] reward: 1.0 action: 0\n",
      "value_next [2368425.5] td_target [2250005.2] td_error [604323.6]\n",
      "state: [ 0.03569503 -0.3928516  -0.04506941  0.52671428] reward: 1.0 action: 0\n",
      "value_next [3107681.8] td_target [2952298.8] td_error [579255.25]\n",
      "state: [ 0.027838   -0.58731138 -0.03453512  0.80486183] reward: 1.0 action: 0\n",
      "value_next [3862987.8] td_target [3669839.2] td_error [556318.5]\n",
      "state: [ 0.01609177 -0.78194328 -0.01843789  1.08648429] reward: 1.0 action: 0\n",
      "value_next [4637282.5] td_target [4405419.5] td_error [535388.25]\n",
      "state: [ 4.52902980e-04 -9.76817252e-01  3.29179949e-03  1.37332513e+00] reward: 1.0 action: 0\n",
      "value_next [5433546.] td_target [5161869.5] td_error [516281.5]\n",
      "state: [-0.01908344 -1.1719802   0.0307583   1.66703576] reward: 1.0 action: 0\n",
      "value_next [6254724.] td_target [5941988.5] td_error [498752.5]\n",
      "state: [-0.04252305 -1.36744609  0.06409902  1.96913742] reward: 1.0 action: 0\n",
      "value_next [7103642.] td_target [6748461.] td_error [482485.]\n",
      "state: [-0.06987197 -1.56318371  0.10348177  2.28097311] reward: 1.0 action: 0\n",
      "value_next [7982898.] td_target [7583754.] td_error [467072.]\n",
      "state: [-0.10113564 -1.75910188  0.14910123  2.60364764] reward: 1.0 action: 0\n",
      "value_next [8894733.] td_target [8449997.] td_error [452002.5]\n",
      "state: [-0.13631768 -1.9550318   0.20117418  2.93795527] reward: 1.0 action: 0\n",
      "value_next [9840875.] td_target [9348832.] td_error [436643.]\n",
      "state: [-0.17541832 -2.15070729  0.25993329  3.2842962 ] reward: 1.0 action: 0\n",
      "value_next [10822355.] td_target [10281238.] td_error [420217.]\n",
      "reward:11.0, max reward:12.0, episode len:11\n",
      "\n",
      "Episode 158/200\n",
      "===============================================\n",
      "state: [ 0.02119676 -0.16481363  0.04972159  0.33441115] reward: 1.0 action: 0\n",
      "value_next [2637472.] td_target [2505599.2] td_error [657812.]\n",
      "state: [ 0.01790049 -0.36060667  0.05640981  0.64234988] reward: 1.0 action: 0\n",
      "value_next [3445845.] td_target [3273553.8] td_error [630923.5]\n",
      "state: [ 0.01068835 -0.55646766  0.06925681  0.95224974] reward: 1.0 action: 0\n",
      "value_next [4272065.5] td_target [4058463.2] td_error [606068.25]\n",
      "state: [-4.41000164e-04 -7.52449788e-01  8.83018074e-02  1.26586342e+00] reward: 1.0 action: 0\n",
      "value_next [5119057.] td_target [4863105.] td_error [583083.]\n",
      "state: [-0.01549    -0.94858207  0.11361908  1.58484264] reward: 1.0 action: 0\n",
      "value_next [5989727.] td_target [5690241.5] td_error [561719.5]\n",
      "state: [-0.03446164 -1.14485708  0.14531593  1.91068904] reward: 1.0 action: 0\n",
      "value_next [6886845.5] td_target [6542504.] td_error [541624.5]\n",
      "state: [-0.05735878 -1.34121683  0.18352971  2.24469697] reward: 1.0 action: 0\n",
      "value_next [7812910.5] td_target [7422266.] td_error [522343.5]\n",
      "state: [-0.08418312 -1.53753658  0.22842365  2.58788634] reward: 1.0 action: 0\n",
      "value_next [8769998.] td_target [8331499.] td_error [503297.]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 159/200\n",
      "================================================\n",
      "state: [-0.02943949 -0.21899422  0.01415336  0.2847018 ] reward: 1.0 action: 0\n",
      "value_next [2698288.5] td_target [2563375.] td_error [649656.]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.03381937 -0.41431514  0.0198474   0.58181479] reward: 1.0 action: 0\n",
      "value_next [3500926.8] td_target [3325881.2] td_error [622478.25]\n",
      "state: [-0.04210567 -0.60970947  0.03148369  0.88068332] reward: 1.0 action: 0\n",
      "value_next [4320839.5] td_target [4104798.5] td_error [597467.75]\n",
      "state: [-0.05429986 -0.80524465  0.04909736  1.18309541] reward: 1.0 action: 0\n",
      "value_next [5161085.] td_target [4903031.5] td_error [574474.]\n",
      "state: [-0.07040476 -1.00096809  0.07275927  1.49075561] reward: 1.0 action: 0\n",
      "value_next [6024729.] td_target [5723493.5] td_error [553271.]\n",
      "state: [-0.09042412 -1.19689646  0.10257438  1.80524294] reward: 1.0 action: 0\n",
      "value_next [6914736.5] td_target [6569000.5] td_error [533541.]\n",
      "state: [-0.11436205 -1.39300318  0.13867924  2.12796089] reward: 1.0 action: 0\n",
      "value_next [7833861.] td_target [7442169.] td_error [514874.5]\n",
      "state: [-0.14222211 -1.58920357  0.18123846  2.46007716] reward: 1.0 action: 0\n",
      "value_next [8784498.] td_target [8345274.] td_error [496746.5]\n",
      "state: [-0.17400618 -1.78533769  0.23044     2.80245255] reward: 1.0 action: 0\n",
      "value_next [9768531.] td_target [9280105.] td_error [478513.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 160/200\n",
      "================================================\n",
      "state: [ 0.0477104  -0.1631483  -0.01798152  0.31674619] reward: 1.0 action: 0\n",
      "value_next [2578286.2] td_target [2449373.] td_error [650891.]\n",
      "state: [ 0.04444744 -0.35800958 -0.01164659  0.60370457] reward: 1.0 action: 0\n",
      "value_next [3376273.8] td_target [3207461.] td_error [624332.75]\n",
      "state: [ 3.72872442e-02 -5.52966723e-01  4.27498030e-04  8.92696431e-01] reward: 1.0 action: 0\n",
      "value_next [4191976.8] td_target [3982378.8] td_error [599991.75]\n",
      "state: [ 0.02622791 -0.74809447  0.01828143  1.18551371] reward: 1.0 action: 0\n",
      "value_next [5028504.5] td_target [4777080.] td_error [577714.]\n",
      "state: [ 0.01126602 -0.9434487   0.0419917   1.48387053] reward: 1.0 action: 0\n",
      "value_next [5888976.5] td_target [5594528.5] td_error [557272.5]\n",
      "state: [-0.00760295 -1.13905677  0.07166911  1.78936591] reward: 1.0 action: 0\n",
      "value_next [6776427.] td_target [6437606.5] td_error [538364.]\n",
      "state: [-0.03038409 -1.33490588  0.10745643  2.10343811] reward: 1.0 action: 0\n",
      "value_next [7693703.] td_target [7309019.] td_error [520600.]\n",
      "state: [-0.05708221 -1.53092914  0.14952519  2.42730847] reward: 1.0 action: 0\n",
      "value_next [8643323.] td_target [8211157.5] td_error [503477.5]\n",
      "state: [-0.08770079 -1.72698904  0.19807136  2.76191362] reward: 1.0 action: 0\n",
      "value_next [9627336.] td_target [9145970.] td_error [486380.]\n",
      "state: [-0.12224057 -1.9228585   0.25330963  3.10782628] reward: 1.0 action: 0\n",
      "value_next [10647126.] td_target [10114771.] td_error [468543.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 161/200\n",
      "================================================\n",
      "state: [ 0.0171607  -0.18805862  0.01665044  0.31228908] reward: 1.0 action: 0\n",
      "value_next [2738780.2] td_target [2601842.2] td_error [676525.6]\n",
      "state: [ 0.01339953 -0.38341378  0.02289622  0.6101762 ] reward: 1.0 action: 0\n",
      "value_next [3571182.8] td_target [3392624.5] td_error [648649.25]\n",
      "state: [ 0.00573125 -0.57884817  0.03509975  0.90998185] reward: 1.0 action: 0\n",
      "value_next [4421809.] td_target [4200719.5] td_error [622990.]\n",
      "state: [-0.00584571 -0.77442711  0.05329938  1.21348678] reward: 1.0 action: 0\n",
      "value_next [5293797.5] td_target [5029108.5] td_error [599388.]\n",
      "state: [-0.02133425 -0.97019482  0.07756912  1.52238393] reward: 1.0 action: 0\n",
      "value_next [6190282.] td_target [5880769.] td_error [577597.5]\n",
      "state: [-0.04073815 -1.16616344  0.1080168   1.83823542] reward: 1.0 action: 0\n",
      "value_next [7114287.] td_target [6758573.5] td_error [557282.5]\n",
      "state: [-0.06406142 -1.36230029  0.14478151  2.16242123] reward: 1.0 action: 0\n",
      "value_next [8068598.] td_target [7665169.] td_error [538008.]\n",
      "state: [-0.09130742 -1.55851268  0.18802993  2.49607771] reward: 1.0 action: 0\n",
      "value_next [9055620.] td_target [8602840.] td_error [519219.]\n",
      "state: [-0.12247768 -1.75463054  0.23795148  2.84002509] reward: 1.0 action: 0\n",
      "value_next [10077203.] td_target [9573344.] td_error [500229.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 162/200\n",
      "=================================================\n",
      "state: [-0.02714569 -0.22317681 -0.0429626   0.25067168] reward: 1.0 action: 0\n",
      "value_next [2715457.2] td_target [2579685.2] td_error [658113.9]\n",
      "state: [-0.03160923 -0.41765976 -0.03794917  0.52949978] reward: 1.0 action: 0\n",
      "value_next [3527285.2] td_target [3350922.] td_error [630416.75]\n",
      "state: [-0.03996242 -0.61222787 -0.02735917  0.80998744] reward: 1.0 action: 0\n",
      "value_next [4356494.5] td_target [4138670.8] td_error [605065.25]\n",
      "state: [-0.05220698 -0.80696449 -0.01115943  1.09394047] reward: 1.0 action: 0\n",
      "value_next [5206319.] td_target [4946004.] td_error [581920.]\n",
      "state: [-0.06834627 -1.00193767  0.01071938  1.38310122] reward: 1.0 action: 0\n",
      "value_next [6080030.] td_target [5776029.5] td_error [560774.]\n",
      "state: [-0.08838502 -1.19719169  0.03838141  1.67911686] reward: 1.0 action: 0\n",
      "value_next [6980850.] td_target [6631808.5] td_error [541352.5]\n",
      "state: [-0.11232886 -1.39273693  0.07196375  1.98350007] reward: 1.0 action: 0\n",
      "value_next [7911858.] td_target [7516266.] td_error [523299.5]\n",
      "state: [-0.14018359 -1.58853734  0.11163375  2.29757959] reward: 1.0 action: 0\n",
      "value_next [8875866.] td_target [8432074.] td_error [506159.5]\n",
      "state: [-0.17195434 -1.78449534  0.15758534  2.62243914] reward: 1.0 action: 0\n",
      "value_next [9875278.] td_target [9381515.] td_error [489360.]\n",
      "state: [-0.20764425 -1.98043403  0.21003412  2.95884397] reward: 1.0 action: 0\n",
      "value_next [10911914.] td_target [10366319.] td_error [472193.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 163/200\n",
      "=================================================\n",
      "state: [-0.00263765 -0.2086232   0.01717955  0.33502617] reward: 1.0 action: 0\n",
      "value_next [2933586.8] td_target [2786908.5] td_error [696632.4]\n",
      "state: [-0.00681011 -0.40398538  0.02388007  0.63307674] reward: 1.0 action: 0\n",
      "value_next [3796706.5] td_target [3606872.2] td_error [667894.5]\n",
      "state: [-0.01488982 -0.59943218  0.03654161  0.93318343] reward: 1.0 action: 0\n",
      "value_next [4678860.5] td_target [4444918.5] td_error [641452.]\n",
      "state: [-0.02687847 -0.79502759  0.05520528  1.23712152] reward: 1.0 action: 0\n",
      "value_next [5583308.5] td_target [5304144.] td_error [617128.5]\n",
      "state: [-0.04277902 -0.99081365  0.07994771  1.54657533] reward: 1.0 action: 0\n",
      "value_next [6513297.] td_target [6187633.] td_error [594662.5]\n",
      "state: [-0.06259529 -1.18679947  0.11087921  1.86309464] reward: 1.0 action: 0\n",
      "value_next [7471947.] td_target [7098350.5] td_error [573699.]\n",
      "state: [-0.08633128 -1.38294814  0.14814111  2.18804262] reward: 1.0 action: 0\n",
      "value_next [8462116.] td_target [8039011.] td_error [553775.]\n",
      "state: [-0.11399024 -1.57916152  0.19190196  2.5225334 ] reward: 1.0 action: 0\n",
      "value_next [9486254.] td_target [9011942.] td_error [534306.]\n",
      "state: [-0.14557347 -1.77526257  0.24235263  2.86735884] reward: 1.0 action: 0\n",
      "value_next [10546220.] td_target [10018910.] td_error [514569.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 164/200\n",
      "=================================================\n",
      "state: [ 0.01028254 -0.20251718 -0.00121997  0.32304607] reward: 1.0 action: 0\n",
      "value_next [2910298.] td_target [2764784.] td_error [700988.25]\n",
      "state: [ 0.0062322  -0.39762174  0.00524095  0.61534402] reward: 1.0 action: 0\n",
      "value_next [3776576.2] td_target [3587748.5] td_error [672097.75]\n",
      "state: [-0.00172023 -0.59281652  0.01754783  0.90967301] reward: 1.0 action: 0\n",
      "value_next [4661951.5] td_target [4428855.] td_error [645569.5]\n",
      "state: [-0.01357656 -0.78817152  0.03574129  1.20781918] reward: 1.0 action: 0\n",
      "value_next [5569754.5] td_target [5291267.5] td_error [621233.]\n",
      "state: [-0.02933999 -0.98373648  0.05989767  1.51148499] reward: 1.0 action: 0\n",
      "value_next [6503311.5] td_target [6178147.] td_error [598834.5]\n",
      "state: [-0.04901472 -1.17953064  0.09012737  1.82224892] reward: 1.0 action: 0\n",
      "value_next [7465838.5] td_target [7092547.5] td_error [578031.]\n",
      "state: [-0.07260534 -1.3755304   0.12657235  2.14151689] reward: 1.0 action: 0\n",
      "value_next [8460322.] td_target [8037307.] td_error [558381.]\n",
      "state: [-0.10011594 -1.57165478  0.16940269  2.47046307] reward: 1.0 action: 0\n",
      "value_next [9489361.] td_target [9014894.] td_error [539321.]\n",
      "state: [-0.13154904 -1.7677483   0.21881195  2.80995945] reward: 1.0 action: 0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "value_next [10555009.] td_target [10027259.] td_error [520153.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 165/200\n",
      "==================================================\n",
      "state: [-0.00915045 -0.15546029 -0.00682463  0.25145811] reward: 1.0 action: 0\n",
      "value_next [2764820.5] td_target [2626580.5] td_error [716514.1]\n",
      "state: [-0.01225966 -0.35048412 -0.00179547  0.54198063] reward: 1.0 action: 0\n",
      "value_next [3638320.] td_target [3456405.] td_error [686590.5]\n",
      "state: [-0.01926934 -0.54558079  0.00904414  0.83409729] reward: 1.0 action: 0\n",
      "value_next [4530274.] td_target [4303761.] td_error [659112.5]\n",
      "state: [-0.03018095 -0.74082514  0.02572609  1.12961077] reward: 1.0 action: 0\n",
      "value_next [5444067.] td_target [5171864.5] td_error [633927.]\n",
      "state: [-0.04499746 -0.93627439  0.04831831  1.43025028] reward: 1.0 action: 0\n",
      "value_next [6383107.5] td_target [6063953.] td_error [610797.5]\n",
      "state: [-0.06372294 -1.13195842  0.07692331  1.73763381] reward: 1.0 action: 0\n",
      "value_next [7350723.5] td_target [6983188.] td_error [589403.5]\n",
      "state: [-0.08636211 -1.32786819  0.11167599  2.05322262] reward: 1.0 action: 0\n",
      "value_next [8350039.5] td_target [7932538.5] td_error [569325.5]\n",
      "state: [-0.11291948 -1.52394192  0.15274044  2.37826564] reward: 1.0 action: 0\n",
      "value_next [9383844.] td_target [8914653.] td_error [550030.5]\n",
      "state: [-0.14339832 -1.72004873  0.20030575  2.71373249] reward: 1.0 action: 0\n",
      "value_next [10454416.] td_target [9931696.] td_error [530851.]\n",
      "state: [-0.17779929 -1.91597007  0.2545804   3.06023544] reward: 1.0 action: 0\n",
      "value_next [11563327.] td_target [10985162.] td_error [510972.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 166/200\n",
      "==================================================\n",
      "state: [ 0.01038946 -0.1704273   0.03172833  0.31765832] reward: 1.0 action: 0\n",
      "value_next [2988254.2] td_target [2838842.5] td_error [744276.1]\n",
      "state: [ 0.00698092 -0.36598646  0.0380815   0.62017598] reward: 1.0 action: 0\n",
      "value_next [3902429.2] td_target [3707308.8] td_error [713603.75]\n",
      "state: [-3.38814094e-04 -5.61618988e-01  5.04850204e-02  9.24605279e-01] reward: 1.0 action: 0\n",
      "value_next [4836467.5] td_target [4594645.] td_error [685330.]\n",
      "state: [-0.01157119 -0.75738517  0.06897713  1.23271671] reward: 1.0 action: 0\n",
      "value_next [5793766.5] td_target [5504079.] td_error [659270.5]\n",
      "state: [-0.0267189  -0.95332294  0.09363146  1.54618765] reward: 1.0 action: 0\n",
      "value_next [6777710.] td_target [6438825.5] td_error [635150.5]\n",
      "state: [-0.04578536 -1.14943637  0.12455521  1.86655671] reward: 1.0 action: 0\n",
      "value_next [7791536.] td_target [7401960.] td_error [612585.]\n",
      "state: [-0.06877408 -1.3456823   0.16188635  2.19516977] reward: 1.0 action: 0\n",
      "value_next [8838197.] td_target [8396288.] td_error [591078.]\n",
      "state: [-0.09568773 -1.54195475  0.20578974  2.53311601] reward: 1.0 action: 0\n",
      "value_next [9920201.] td_target [9424192.] td_error [570003.]\n",
      "state: [-0.12652682 -1.73806703  0.25645206  2.8811535 ] reward: 1.0 action: 0\n",
      "value_next [11039404.] td_target [10487435.] td_error [548578.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 167/200\n",
      "==================================================\n",
      "state: [ 0.02393335 -0.16872005  0.00977137  0.29629861] reward: 1.0 action: 0\n",
      "value_next [2946752.] td_target [2799415.2] td_error [747655.75]\n",
      "state: [ 0.02055895 -0.36397992  0.01569734  0.59204718] reward: 1.0 action: 0\n",
      "value_next [3861984.] td_target [3668885.8] td_error [716788.25]\n",
      "state: [ 0.01327935 -0.55931807  0.02753828  0.88963313] reward: 1.0 action: 0\n",
      "value_next [4796993.5] td_target [4557145.] td_error [688407.]\n",
      "state: [ 0.00209299 -0.75480266  0.04533094  1.19084406] reward: 1.0 action: 0\n",
      "value_next [5755262.] td_target [5467500.] td_error [662334.5]\n",
      "state: [-0.01300306 -0.95048173  0.06914783  1.49738389] reward: 1.0 action: 0\n",
      "value_next [6740268.5] td_target [6403256.] td_error [638305.]\n",
      "state: [-0.0320127  -1.14637264  0.0990955   1.81083132] reward: 1.0 action: 0\n",
      "value_next [7755378.5] td_target [7367610.5] td_error [615959.5]\n",
      "state: [-0.05494015 -1.34244955  0.13531213  2.13259019] reward: 1.0 action: 0\n",
      "value_next [8803705.] td_target [8363520.5] td_error [594826.5]\n",
      "state: [-0.08178914 -1.5386287   0.17796393  2.46382957] reward: 1.0 action: 0\n",
      "value_next [9887956.] td_target [9393559.] td_error [574311.]\n",
      "state: [-0.11256172 -1.73475124  0.22724052  2.80541273] reward: 1.0 action: 0\n",
      "value_next [11010244.] td_target [10459733.] td_error [553670.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 168/200\n",
      "==================================================\n",
      "state: [-0.04338947 -0.24508722  0.0335574   0.29603601] reward: 1.0 action: 0\n",
      "value_next [3255498.2] td_target [3092724.2] td_error [762494.]\n",
      "state: [-0.04829122 -0.4406711   0.03947812  0.59911068] reward: 1.0 action: 0\n",
      "value_next [4201826.5] td_target [3991736.] td_error [730425.75]\n",
      "state: [-0.05710464 -0.63632251  0.05146033  0.90396269] reward: 1.0 action: 0\n",
      "value_next [5168353.5] td_target [4909937.] td_error [700855.]\n",
      "state: [-0.06983109 -0.83210227  0.06953959  1.21236622] reward: 1.0 action: 0\n",
      "value_next [6158616.5] td_target [5850686.5] td_error [673595.5]\n",
      "state: [-0.08647314 -1.02804945  0.09378691  1.52600478] reward: 1.0 action: 0\n",
      "value_next [7176135.5] td_target [6817329.5] td_error [648364.5]\n",
      "state: [-0.10703412 -1.22416995  0.12430701  1.84642562] reward: 1.0 action: 0\n",
      "value_next [8224289.] td_target [7813075.5] td_error [624774.]\n",
      "state: [-0.13151752 -1.42042319  0.16123552  2.1749862 ] reward: 1.0 action: 0\n",
      "value_next [9306157.] td_target [8840850.] td_error [602312.5]\n",
      "state: [-0.15992599 -1.61670656  0.20473524  2.5127906 ] reward: 1.0 action: 0\n",
      "value_next [10424370.] td_target [9903152.] td_error [580341.]\n",
      "state: [-0.19226012 -1.81283776  0.25499105  2.86061559] reward: 1.0 action: 0\n",
      "value_next [11580889.] td_target [11001845.] td_error [558058.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 169/200\n",
      "===================================================\n",
      "state: [-0.02459042 -0.23342251  0.00473548  0.29999628] reward: 1.0 action: 0\n",
      "value_next [3226825.2] td_target [3065485.] td_error [761007.5]\n",
      "state: [-0.02925887 -0.42861163  0.0107354   0.59416891] reward: 1.0 action: 0\n",
      "value_next [4170380.8] td_target [3961862.8] td_error [729234.5]\n",
      "state: [-0.0378311  -0.62388219  0.02261878  0.89021402] reward: 1.0 action: 0\n",
      "value_next [5134374.5] td_target [4877656.5] td_error [700040.5]\n",
      "state: [-0.05030875 -0.81930362  0.04042306  1.18992053] reward: 1.0 action: 0\n",
      "value_next [6122430.] td_target [5816309.5] td_error [673242.5]\n",
      "state: [-0.06669482 -1.01492546  0.06422147  1.49499474] reward: 1.0 action: 0\n",
      "value_next [7138166.5] td_target [6781259.] td_error [648567.5]\n",
      "state: [-0.08699333 -1.21076695  0.09412137  1.80701952] reward: 1.0 action: 0\n",
      "value_next [8185080.] td_target [7775827.] td_error [625642.5]\n",
      "state: [-0.11120867 -1.40680469  0.13026176  2.12740543] reward: 1.0 action: 0\n",
      "value_next [9266411.] td_target [8803091.] td_error [603988.5]\n",
      "state: [-0.13934476 -1.60295802  0.17280987  2.45733143] reward: 1.0 action: 0\n",
      "value_next [10384984.] td_target [9865736.] td_error [582994.]\n",
      "state: [-0.17140392 -1.79907199  0.22195649  2.79767446] reward: 1.0 action: 0\n",
      "value_next [11543020.] td_target [10965870.] td_error [561902.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 170/200\n",
      "===================================================\n",
      "state: [-0.00823057 -0.15165872 -0.0304962   0.27767341] reward: 1.0 action: 0\n",
      "value_next [3005616.5] td_target [2855336.8] td_error [761708.9]\n",
      "state: [-0.01126374 -0.34633263 -0.02494273  0.56058401] reward: 1.0 action: 0\n",
      "value_next [3937994.] td_target [3741095.2] td_error [730210.25]\n",
      "state: [-0.01819039 -0.5410958  -0.01373105  0.84530549] reward: 1.0 action: 0\n",
      "value_next [4890538.5] td_target [4646012.5] td_error [701375.5]\n",
      "state: [-0.02901231 -0.73602773  0.00317506  1.13363902] reward: 1.0 action: 0\n",
      "value_next [5866933.5] td_target [5573588.] td_error [675032.]\n",
      "state: [-0.04373286 -0.93119109  0.02584784  1.42731606] reward: 1.0 action: 0\n",
      "value_next [6870880.5] td_target [6527337.5] td_error [650928.5]\n",
      "state: [-0.06235669 -1.12662266  0.05439416  1.7279639 ] reward: 1.0 action: 0\n",
      "value_next [7906004.5] td_target [7510705.] td_error [628729.5]\n",
      "state: [-0.08488914 -1.32232246  0.08895344  2.03706344] reward: 1.0 action: 0\n",
      "value_next [8975730.] td_target [8526944.] td_error [608001.5]\n",
      "state: [-0.11133559 -1.51824064  0.12969471  2.35589663] reward: 1.0 action: 0\n",
      "value_next [10083141.] td_target [9578985.] td_error [588196.]\n",
      "state: [-0.1417004  -1.71426159  0.17681264  2.68548205] reward: 1.0 action: 0\n",
      "value_next [11230803.] td_target [10669264.] td_error [568615.]\n",
      "state: [-0.17598563 -1.91018568  0.23052228  3.02649878] reward: 1.0 action: 0\n",
      "value_next [12420566.] td_target [11799539.] td_error [548417.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 171/200\n",
      "===================================================\n",
      "state: [ 0.03776538 -0.20229985  0.02992735  0.34891081] reward: 1.0 action: 0\n",
      "value_next [3321532.5] td_target [3155456.8] td_error [802620.]\n",
      "state: [ 0.03371938 -0.39783438  0.03690557  0.6508786 ] reward: 1.0 action: 0\n",
      "value_next [4312699.5] td_target [4097065.5] td_error [769745.25]\n",
      "state: [ 0.02576269 -0.5934504   0.04992314  0.9549506 ] reward: 1.0 action: 0\n",
      "value_next [5325734.5] td_target [5059448.5] td_error [739458.5]\n",
      "state: [ 0.01389368 -0.78920706  0.06902215  1.26289106] reward: 1.0 action: 0\n",
      "value_next [6364320.] td_target [6046105.] td_error [711548.]\n",
      "state: [-0.00189046 -0.98514025  0.09427998  1.57636745] reward: 1.0 action: 0\n",
      "value_next [7432103.] td_target [7060499.] td_error [685703.]\n",
      "state: [-0.02159326 -1.18125091  0.12580732  1.8969043 ] reward: 1.0 action: 0\n",
      "value_next [8532557.] td_target [8105930.] td_error [661495.]\n",
      "state: [-0.04521828 -1.3774916   0.16374541  2.22582876] reward: 1.0 action: 0\n",
      "value_next [9668833.] td_target [9185392.] td_error [638377.]\n",
      "state: [-0.07276811 -1.57375061  0.20826199  2.56420574] reward: 1.0 action: 0\n",
      "value_next [10843575.] td_target [10301397.] td_error [615659.]\n",
      "state: [-0.10424312 -1.76983405  0.2595461   2.91276274] reward: 1.0 action: 0\n",
      "value_next [12058705.] td_target [11455771.] td_error [592479.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 172/200\n",
      "====================================================\n",
      "state: [-0.00298576 -0.21220181  0.00769016  0.31112069] reward: 1.0 action: 0\n",
      "value_next [3341092.] td_target [3174038.2] td_error [802349.]\n",
      "state: [-0.0072298  -0.40743248  0.01391257  0.60621891] reward: 1.0 action: 0\n",
      "value_next [4332698.] td_target [4116064.] td_error [769107.]\n",
      "state: [-0.01537845 -0.60274618  0.02603695  0.90325127] reward: 1.0 action: 0\n",
      "value_next [5345890.] td_target [5078596.5] td_error [738553.5]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.02743337 -0.79821095  0.04410197  1.20400305] reward: 1.0 action: 0\n",
      "value_next [6384446.5] td_target [6065225.] td_error [710491.5]\n",
      "state: [-0.04339759 -0.99387439  0.06818204  1.51017461] reward: 1.0 action: 0\n",
      "value_next [7452135.] td_target [7079529.] td_error [684630.]\n",
      "state: [-0.06327508 -1.18975303  0.09838553  1.82333988] reward: 1.0 action: 0\n",
      "value_next [8552591.] td_target [8124962.5] td_error [660575.5]\n",
      "state: [-0.08707014 -1.38581987  0.13485233  2.14489658] reward: 1.0 action: 0\n",
      "value_next [9689166.] td_target [9204709.] td_error [637812.]\n",
      "state: [-0.11478654 -1.58198955  0.17775026  2.47600616] reward: 1.0 action: 0\n",
      "value_next [10864768.] td_target [10321530.] td_error [615691.]\n",
      "state: [-0.14642633 -1.77810117  0.22727038  2.81752251] reward: 1.0 action: 0\n",
      "value_next [12081654.] td_target [11477572.] td_error [593408.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 173/200\n",
      "====================================================\n",
      "state: [-0.0097947  -0.23032766 -0.04732009  0.26979391] reward: 1.0 action: 0\n",
      "value_next [3270838.2] td_target [3107297.2] td_error [783787.75]\n",
      "state: [-0.01440125 -0.42474352 -0.04192422  0.54718434] reward: 1.0 action: 0\n",
      "value_next [4239414.] td_target [4027444.2] td_error [750984.75]\n",
      "state: [-0.02289612 -0.61925216 -0.03098053  0.82636882] reward: 1.0 action: 0\n",
      "value_next [5228868.5] td_target [4967426.] td_error [720990.]\n",
      "state: [-0.03528117 -0.81393706 -0.01445315  1.10914922] reward: 1.0 action: 0\n",
      "value_next [6243070.5] td_target [5930918.] td_error [693628.]\n",
      "state: [-0.05155991 -1.00886613  0.00772983  1.3972632 ] reward: 1.0 action: 0\n",
      "value_next [7285917.5] td_target [6921622.5] td_error [668645.]\n",
      "state: [-0.07173723 -1.20408335  0.0356751   1.69235284] reward: 1.0 action: 0\n",
      "value_next [8361239.5] td_target [7943178.5] td_error [645709.5]\n",
      "state: [-0.0958189  -1.39959864  0.06952215  1.99592542] reward: 1.0 action: 0\n",
      "value_next [9472680.] td_target [8999047.] td_error [624392.]\n",
      "state: [-0.12381087 -1.59537532  0.10944066  2.30930409] reward: 1.0 action: 0\n",
      "value_next [10623556.] td_target [10092379.] td_error [604143.]\n",
      "state: [-0.15571838 -1.79131501  0.15562674  2.63356663] reward: 1.0 action: 0\n",
      "value_next [11816694.] td_target [11225860.] td_error [584282.]\n",
      "state: [-0.19154468 -1.98723979  0.20829807  2.96947184] reward: 1.0 action: 0\n",
      "value_next [13054210.] td_target [12401500.] td_error [563958.]\n",
      "state: [-0.23128947 -2.18287224  0.26768751  3.31737515] reward: 1.0 action: 0\n",
      "value_next [14337277.] td_target [13620414.] td_error [542139.]\n",
      "reward:11.0, max reward:12.0, episode len:11\n",
      "\n",
      "Episode 174/200\n",
      "====================================================\n",
      "state: [ 0.03805156 -0.21259415 -0.02909786  0.2406054 ] reward: 1.0 action: 0\n",
      "value_next [3195870.5] td_target [3036078.] td_error [817025.25]\n",
      "state: [ 0.03379967 -0.40728862 -0.02428575  0.52396991] reward: 1.0 action: 0\n",
      "value_next [4193927.] td_target [3984231.5] td_error [782821.75]\n",
      "state: [ 0.0256539  -0.60206051 -0.01380635  0.80890229] reward: 1.0 action: 0\n",
      "value_next [5213099.5] td_target [4952445.5] td_error [751474.]\n",
      "state: [ 0.01361269 -0.79699058  0.00237169  1.09721058] reward: 1.0 action: 0\n",
      "value_next [6257297.] td_target [5944433.] td_error [722805.5]\n",
      "state: [-0.00232712 -0.99214368  0.02431591  1.39063669] reward: 1.0 action: 0\n",
      "value_next [7330464.5] td_target [6963942.] td_error [696561.]\n",
      "state: [-0.02217    -1.18755989  0.05212864  1.69082253] reward: 1.0 action: 0\n",
      "value_next [8436464.] td_target [8014641.5] td_error [672388.5]\n",
      "state: [-0.04592119 -1.38324389  0.08594509  1.99926868] reward: 1.0 action: 0\n",
      "value_next [9578961.] td_target [9100014.] td_error [649841.]\n",
      "state: [-0.07358607 -1.579152    0.12593046  2.31728295] reward: 1.0 action: 0\n",
      "value_next [10761270.] td_target [10223207.] td_error [628341.]\n",
      "state: [-0.10516911 -1.77517669  0.17227612  2.64591736] reward: 1.0 action: 0\n",
      "value_next [11986172.] td_target [11386864.] td_error [607171.]\n",
      "state: [-0.14067265 -1.97112854  0.22519447  2.98589318] reward: 1.0 action: 0\n",
      "value_next [13255704.] td_target [12592920.] td_error [585440.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 175/200\n",
      "====================================================\n",
      "state: [ 0.00785518 -0.17846628 -0.04028998  0.26891323] reward: 1.0 action: 0\n",
      "value_next [3264429.] td_target [3101208.5] td_error [822011.75]\n",
      "state: [ 0.00428585 -0.37299078 -0.03491172  0.54862115] reward: 1.0 action: 0\n",
      "value_next [4271403.] td_target [4057833.8] td_error [787862.25]\n",
      "state: [-0.00317397 -0.56760536 -0.02393929  0.83010316] reward: 1.0 action: 0\n",
      "value_next [5300018.] td_target [5035018.] td_error [756612.5]\n",
      "state: [-0.01452607 -0.76239204 -0.00733723  1.11516199] reward: 1.0 action: 0\n",
      "value_next [6354254.5] td_target [6036542.5] td_error [728078.]\n",
      "state: [-0.02977391 -0.9574169   0.01496601  1.40553428] reward: 1.0 action: 0\n",
      "value_next [7438121.5] td_target [7066216.5] td_error [701995.]\n",
      "state: [-0.04892225 -1.15272141  0.04307669  1.70285808] reward: 1.0 action: 0\n",
      "value_next [8555555.] td_target [8127778.] td_error [678014.]\n",
      "state: [-0.07197668 -1.34831202  0.07713386  2.00863246] reward: 1.0 action: 0\n",
      "value_next [9710283.] td_target [9224770.] td_error [655679.]\n",
      "state: [-0.09894292 -1.5441474   0.11730651  2.32416697] reward: 1.0 action: 0\n",
      "value_next [10905690.] td_target [10360406.] td_error [634413.]\n",
      "state: [-0.12982587 -1.74012309  0.16378984  2.65051932] reward: 1.0 action: 0\n",
      "value_next [12144629.] td_target [11537398.] td_error [613495.]\n",
      "state: [-0.16462833 -1.93605348  0.21680023  2.98842072] reward: 1.0 action: 0\n",
      "value_next [13429204.] td_target [12757745.] td_error [592029.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 176/200\n",
      "=====================================================\n",
      "state: [-0.00896086 -0.22798699  0.02482004  0.29237925] reward: 1.0 action: 0\n",
      "value_next [3602838.8] td_target [3422697.8] td_error [868856.5]\n",
      "state: [-0.0135206  -0.42345387  0.03066762  0.59278561] reward: 1.0 action: 0\n",
      "value_next [4675076.5] td_target [4441323.5] td_error [832451.5]\n",
      "state: [-0.02198968 -0.6189914   0.04252334  0.89496885] reward: 1.0 action: 0\n",
      "value_next [5770081.5] td_target [5481578.5] td_error [798912.]\n",
      "state: [-0.03436951 -0.81466337  0.06042271  1.20070958] reward: 1.0 action: 0\n",
      "value_next [6891874.] td_target [6547281.] td_error [768028.]\n",
      "state: [-0.05066277 -1.01051257  0.0844369   1.51170074] reward: 1.0 action: 0\n",
      "value_next [8044455.5] td_target [7642233.5] td_error [739484.]\n",
      "state: [-0.07087303 -1.20654967  0.11467092  1.82950373] reward: 1.0 action: 0\n",
      "value_next [9231665.] td_target [8770083.] td_error [712842.5]\n",
      "state: [-0.09500402 -1.40274022  0.15126099  2.15549624] reward: 1.0 action: 0\n",
      "value_next [10457025.] td_target [9934175.] td_error [687539.]\n",
      "state: [-0.12305882 -1.59898946  0.19437092  2.49081024] reward: 1.0 action: 0\n",
      "value_next [11723545.] td_target [11137369.] td_error [662858.]\n",
      "state: [-0.15503861 -1.79512479  0.24418712  2.83625912] reward: 1.0 action: 0\n",
      "value_next [13033513.] td_target [12381838.] td_error [637909.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 177/200\n",
      "=====================================================\n",
      "state: [ 0.00450119 -0.19394554 -0.04321574  0.2760903 ] reward: 1.0 action: 0\n",
      "value_next [3417036.] td_target [3246185.2] td_error [844348.5]\n",
      "state: [ 0.00062228 -0.38842514 -0.03769393  0.55483586] reward: 1.0 action: 0\n",
      "value_next [4454667.5] td_target [4231935.] td_error [809188.]\n",
      "state: [-0.00714622 -0.58299812 -0.02659721  0.83540861] reward: 1.0 action: 0\n",
      "value_next [5514599.] td_target [5238870.] td_error [777028.]\n",
      "state: [-0.01880618 -0.77774683 -0.00988904  1.11960971] reward: 1.0 action: 0\n",
      "value_next [6600956.5] td_target [6270909.5] td_error [747679.5]\n",
      "state: [-0.03436112 -0.97273767  0.01250315  1.40917433] reward: 1.0 action: 0\n",
      "value_next [7717887.] td_target [7331993.5] td_error [720864.5]\n",
      "state: [-0.05381587 -1.16801246  0.04068664  1.70573945] reward: 1.0 action: 0\n",
      "value_next [8869456.] td_target [8425984.] td_error [696222.]\n",
      "state: [-0.07717612 -1.36357814  0.07480143  2.01080377] reward: 1.0 action: 0\n",
      "value_next [10059517.] td_target [9556542.] td_error [673281.]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.10444768 -1.55939404  0.1150175   2.32567739] reward: 1.0 action: 0\n",
      "value_next [11291565.] td_target [10726988.] td_error [651450.]\n",
      "state: [-0.13563557 -1.75535655  0.16153105  2.65141965] reward: 1.0 action: 0\n",
      "value_next [12568545.] td_target [11940119.] td_error [629980.]\n",
      "state: [-0.1707427  -1.95128114  0.21455945  2.98876464] reward: 1.0 action: 0\n",
      "value_next [13892635.] td_target [13198004.] td_error [607951.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 178/200\n",
      "=====================================================\n",
      "state: [ 0.02269744 -0.1863375   0.04289552  0.27795461] reward: 1.0 action: 0\n",
      "value_next [3575744.] td_target [3396957.8] td_error [914201.5]\n",
      "state: [ 0.01897069 -0.38204431  0.04845462  0.58385221] reward: 1.0 action: 0\n",
      "value_next [4692262.] td_target [4457650.] td_error [876011.25]\n",
      "state: [ 0.0113298  -0.57781037  0.06013166  0.89139673] reward: 1.0 action: 0\n",
      "value_next [5832110.5] td_target [5540506.] td_error [840747.]\n",
      "state: [-2.26406536e-04 -7.73694274e-01  7.79595960e-02  1.20235959e+00] reward: 1.0 action: 0\n",
      "value_next [6999393.5] td_target [6649424.5] td_error [808195.]\n",
      "state: [-0.01570029 -0.96973291  0.10200679  1.51842131] reward: 1.0 action: 0\n",
      "value_next [8198189.] td_target [7788280.5] td_error [778021.]\n",
      "state: [-0.03509495 -1.16592974  0.13237521  1.84112477] reward: 1.0 action: 0\n",
      "value_next [9432398.] td_target [8960779.] td_error [749764.]\n",
      "state: [-0.05841355 -1.36224131  0.16919771  2.17182064] reward: 1.0 action: 0\n",
      "value_next [10705563.] td_target [10170286.] td_error [722818.]\n",
      "state: [-0.08565837 -1.55856154  0.21263412  2.51160297] reward: 1.0 action: 0\n",
      "value_next [12020677.] td_target [11419644.] td_error [696421.]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 179/200\n",
      "======================================================\n",
      "state: [ 0.02332867 -0.20932115  0.02512737  0.32022373] reward: 1.0 action: 0\n",
      "value_next [3735498.8] td_target [3548724.8] td_error [909546.75]\n",
      "state: [ 0.01914224 -0.40479177  0.03153185  0.62072377] reward: 1.0 action: 0\n",
      "value_next [4856116.] td_target [4613311.] td_error [871754.5]\n",
      "state: [ 0.01104641 -0.60033956  0.04394632  0.92316848] reward: 1.0 action: 0\n",
      "value_next [6000697.] td_target [5700663.] td_error [836955.5]\n",
      "state: [-9.60383896e-04 -7.96026793e-01  6.24096920e-02  1.22933199e+00] reward: 1.0 action: 0\n",
      "value_next [7173449.5] td_target [6814778.] td_error [804916.]\n",
      "state: [-0.01688092 -0.99189376  0.08699633  1.540897  ] reward: 1.0 action: 0\n",
      "value_next [8378545.5] td_target [7959619.] td_error [775293.5]\n",
      "state: [-0.0367188  -1.18794752  0.11781427  1.85941015] reward: 1.0 action: 0\n",
      "value_next [9619967.] td_target [9138970.] td_error [747618.]\n",
      "state: [-0.06047775 -1.38414868  0.15500247  2.18622909] reward: 1.0 action: 0\n",
      "value_next [10901347.] td_target [10356281.] td_error [721283.]\n",
      "state: [-0.08816072 -1.58039602  0.19872706  2.52245941] reward: 1.0 action: 0\n",
      "value_next [12225764.] td_target [11614477.] td_error [695528.]\n",
      "state: [-0.11976864 -1.77650877  0.24917624  2.86888083] reward: 1.0 action: 0\n",
      "value_next [13595511.] td_target [12915736.] td_error [669401.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 180/200\n",
      "======================================================\n",
      "state: [-0.00620515 -0.21809026  0.02834885  0.29809661] reward: 1.0 action: 0\n",
      "value_next [3811741.2] td_target [3621155.2] td_error [923538.]\n",
      "state: [-0.01056696 -0.41360461  0.03431078  0.59958374] reward: 1.0 action: 0\n",
      "value_next [4950400.] td_target [4702881.] td_error [884865.]\n",
      "state: [-0.01883905 -0.60918937  0.04630246  0.9028736 ] reward: 1.0 action: 0\n",
      "value_next [6113148.] td_target [5807491.5] td_error [849229.5]\n",
      "state: [-0.03102284 -0.80490693  0.06435993  1.20974335] reward: 1.0 action: 0\n",
      "value_next [7304248.5] td_target [6939037.] td_error [816406.5]\n",
      "state: [-0.04712098 -1.00079823  0.0885548   1.52188071] reward: 1.0 action: 0\n",
      "value_next [8527934.] td_target [8101538.] td_error [786052.]\n",
      "state: [-0.06713694 -1.1968715   0.11899241  1.84083927] reward: 1.0 action: 0\n",
      "value_next [9788257.] td_target [9298845.] td_error [757700.]\n",
      "state: [-0.09107437 -1.39308912  0.1558092   2.16798571] reward: 1.0 action: 0\n",
      "value_next [11088915.] td_target [10534470.] td_error [730738.]\n",
      "state: [-0.11893615 -1.58935222  0.19916891  2.50443693] reward: 1.0 action: 0\n",
      "value_next [12433056.] td_target [11811404.] td_error [704399.]\n",
      "state: [-0.1507232  -1.78548309  0.24925765  2.85098667] reward: 1.0 action: 0\n",
      "value_next [13823040.] td_target [13131889.] td_error [677729.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 181/200\n",
      "======================================================\n",
      "state: [-0.03507124 -0.24149253 -0.01709579  0.25396094] reward: 1.0 action: 0\n",
      "value_next [3787110.] td_target [3597755.5] td_error [907253.]\n",
      "state: [-0.03990109 -0.43636626 -0.01201657  0.5412029 ] reward: 1.0 action: 0\n",
      "value_next [4907473.5] td_target [4662101.] td_error [868774.]\n",
      "state: [-0.04862842 -0.63131726 -0.00119252  0.83007551] reward: 1.0 action: 0\n",
      "value_next [6051283.5] td_target [5748720.5] td_error [833483.]\n",
      "state: [-0.06125476 -0.82642289  0.01540899  1.12238315] reward: 1.0 action: 0\n",
      "value_next [7222926.5] td_target [6861781.] td_error [801173.5]\n",
      "state: [-0.07778322 -1.02174347  0.03785666  1.41985929] reward: 1.0 action: 0\n",
      "value_next [8426802.] td_target [8005463.] td_error [771543.5]\n",
      "state: [-0.09821809 -1.21731288  0.06625384  1.72413043] reward: 1.0 action: 0\n",
      "value_next [9667198.] td_target [9183839.] td_error [744186.]\n",
      "state: [-0.12256435 -1.41312743  0.10073645  2.03667221] reward: 1.0 action: 0\n",
      "value_next [10948139.] td_target [10400733.] td_error [718569.]\n",
      "state: [-0.1508269  -1.60913231  0.1414699   2.35875548] reward: 1.0 action: 0\n",
      "value_next [12273222.] td_target [11659562.] td_error [694023.]\n",
      "state: [-0.18300954 -1.80520561  0.18864501  2.69138078] reward: 1.0 action: 0\n",
      "value_next [13645405.] td_target [12963136.] td_error [669711.]\n",
      "state: [-0.21911366 -2.00113987  0.24247262  3.0352014 ] reward: 1.0 action: 0\n",
      "value_next [15066753.] td_target [14313416.] td_error [644603.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 182/200\n",
      "=======================================================\n",
      "state: [-0.03846522 -0.17817168 -0.01774886  0.31496709] reward: 1.0 action: 0\n",
      "value_next [3847779.5] td_target [3655391.5] td_error [920574.75]\n",
      "state: [-0.04202865 -0.37303637 -0.01144952  0.60200019] reward: 1.0 action: 0\n",
      "value_next [4985873.5] td_target [4736581.] td_error [882475.]\n",
      "state: [-4.94893819e-02 -5.67996307e-01  5.90484976e-04  8.91054846e-01] reward: 1.0 action: 0\n",
      "value_next [6148790.5] td_target [5841352.] td_error [847566.]\n",
      "state: [-0.06084931 -0.76312626  0.01841158  1.18392333] reward: 1.0 action: 0\n",
      "value_next [7340962.] td_target [6973915.] td_error [815618.]\n",
      "state: [-0.07611183 -0.95848218  0.04209005  1.48232025] reward: 1.0 action: 0\n",
      "value_next [8566820.] td_target [8138480.] td_error [786308.5]\n",
      "state: [-0.09528148 -1.15409151  0.07173645  1.7878452 ] reward: 1.0 action: 0\n",
      "value_next [9830652.] td_target [9339120.] td_error [759198.]\n",
      "state: [-0.11836331 -1.34994161  0.10749336  2.1019372 ] reward: 1.0 action: 0\n",
      "value_next [11136461.] td_target [10579639.] td_error [733731.]\n",
      "state: [-0.14536214 -1.54596578  0.1495321   2.42581855] reward: 1.0 action: 0\n",
      "value_next [12487766.] td_target [11863379.] td_error [709187.]\n",
      "state: [-0.17628146 -1.74202675  0.19804847  2.76042699] reward: 1.0 action: 0\n",
      "value_next [13887406.] td_target [13193037.] td_error [684686.]\n",
      "state: [-0.21112199 -1.93789777  0.25325701  3.10633668] reward: 1.0 action: 0\n",
      "value_next [15337254.] td_target [14570392.] td_error [659138.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 183/200\n",
      "=======================================================\n",
      "state: [-0.04190592 -0.16579048  0.00791526  0.34058524] reward: 1.0 action: 0\n",
      "value_next [3993003.2] td_target [3793354.] td_error [951311.]\n",
      "state: [-0.04522173 -0.36102415  0.01472696  0.63575364] reward: 1.0 action: 0\n",
      "value_next [5170253.5] td_target [4911742.] td_error [912169.]\n",
      "state: [-0.05244222 -0.55634837  0.02744204  0.93303777] reward: 1.0 action: 0\n",
      "value_next [6373355.] td_target [6054688.] td_error [876211.5]\n",
      "state: [-0.06356918 -0.75182961  0.04610279  1.23421617] reward: 1.0 action: 0\n",
      "value_next [7606783.5] td_target [7226445.] td_error [843194.5]\n",
      "state: [-0.07860578 -0.9475129   0.07078712  1.54097865] reward: 1.0 action: 0\n",
      "value_next [8874977.] td_target [8431229.] td_error [812756.]\n",
      "state: [-0.09755603 -1.14341109  0.10160669  1.85488408] reward: 1.0 action: 0\n",
      "value_next [10182192.] td_target [9673083.] td_error [784411.]\n",
      "state: [-0.12042426 -1.33949214  0.13870437  2.1773097 ] reward: 1.0 action: 0\n",
      "value_next [11532327.] td_target [10955712.] td_error [757539.]\n",
      "state: [-0.1472141  -1.53566405  0.18225056  2.50938995] reward: 1.0 action: 0\n",
      "value_next [12928722.] td_target [12282287.] td_error [731346.]\n",
      "state: [-0.17792738 -1.73175746  0.23243836  2.85194414] reward: 1.0 action: 0\n",
      "value_next [14373917.] td_target [13655222.] td_error [704855.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 184/200\n",
      "=======================================================\n",
      "state: [-0.00108618 -0.20505217 -0.01356838  0.30318885] reward: 1.0 action: 0\n",
      "value_next [3947085.5] td_target [3749732.2] td_error [954062.25]\n",
      "state: [-0.00518723 -0.39997815 -0.0075046   0.59156192] reward: 1.0 action: 0\n",
      "value_next [5124065.5] td_target [4867863.] td_error [914451.75]\n",
      "state: [-0.01318679 -0.59499423  0.00432664  0.88187148] reward: 1.0 action: 0\n",
      "value_next [6326452.5] td_target [6010131.] td_error [878140.]\n",
      "state: [-0.02508667 -0.79017468  0.02196407  1.17591145] reward: 1.0 action: 0\n",
      "value_next [7558823.] td_target [7180883.] td_error [844891.]\n",
      "state: [-0.04089017 -0.98557501  0.0454823   1.47539815] reward: 1.0 action: 0\n",
      "value_next [8825744.] td_target [8384457.5] td_error [814368.5]\n",
      "state: [-0.06060167 -1.18122219  0.07499026  1.78193262] reward: 1.0 action: 0\n",
      "value_next [10131636.] td_target [9625055.] td_error [786122.]\n",
      "state: [-0.08422611 -1.37710304  0.11062891  2.09695461] reward: 1.0 action: 0\n",
      "value_next [11480614.] td_target [10906584.] td_error [759568.]\n",
      "state: [-0.11176817 -1.57315018  0.15256801  2.42168622] reward: 1.0 action: 0\n",
      "value_next [12876300.] td_target [12232486.] td_error [733971.]\n",
      "state: [-0.14323118 -1.76922548  0.20100173  2.75706395] reward: 1.0 action: 0\n",
      "value_next [14321598.] td_target [13605519.] td_error [708413.]\n",
      "state: [-0.17861569 -1.96510116  0.25614301  3.10365954] reward: 1.0 action: 0\n",
      "value_next [15818422.] td_target [15027502.] td_error [681773.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 185/200\n",
      "========================================================\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [ 0.00597787 -0.22410055 -0.01214729  0.30010873] reward: 1.0 action: 0\n",
      "value_next [4038531.8] td_target [3836606.] td_error [969493.75]\n",
      "state: [ 0.00149586 -0.41904727 -0.00614512  0.58893602] reward: 1.0 action: 0\n",
      "value_next [5236009.5] td_target [4974210.] td_error [929174.75]\n",
      "state: [-0.00688509 -0.61408263  0.0056336   0.87967689] reward: 1.0 action: 0\n",
      "value_next [6459324.5] td_target [6136359.] td_error [892196.5]\n",
      "state: [-0.01916674 -0.80928067  0.02322714  1.17412557] reward: 1.0 action: 0\n",
      "value_next [7713114.5] td_target [7327459.5] td_error [858325.5]\n",
      "state: [-0.03535235 -1.00469666  0.04670965  1.47399866] reward: 1.0 action: 0\n",
      "value_next [9002007.] td_target [8551908.] td_error [827218.5]\n",
      "state: [-0.05544629 -1.20035734  0.07618963  1.78089721] reward: 1.0 action: 0\n",
      "value_next [10330479.] td_target [9813956.] td_error [798417.]\n",
      "state: [-0.07945343 -1.39624917  0.11180757  2.09626062] reward: 1.0 action: 0\n",
      "value_next [11702698.] td_target [11117564.] td_error [771328.]\n",
      "state: [-0.10737842 -1.59230435  0.15373278  2.42131013] reward: 1.0 action: 0\n",
      "value_next [13122332.] td_target [12466216.] td_error [745204.]\n",
      "state: [-0.13922451 -1.78838421  0.20215898  2.7569808 ] reward: 1.0 action: 0\n",
      "value_next [14592311.] td_target [13862696.] td_error [719113.]\n",
      "state: [-0.17499219 -1.98426034  0.2572986   3.1038423 ] reward: 1.0 action: 0\n",
      "value_next [16114560.] td_target [15308833.] td_error [691913.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 186/200\n",
      "========================================================\n",
      "state: [ 0.01106694 -0.15774232  0.00363575  0.26187041] reward: 1.0 action: 0\n",
      "value_next [3866431.5] td_target [3673111.] td_error [1006483.75]\n",
      "state: [ 0.00791209 -0.35291598  0.00887315  0.55569788] reward: 1.0 action: 0\n",
      "value_next [5091803.5] td_target [4837214.5] td_error [964589.]\n",
      "state: [ 0.00085377 -0.54816138  0.01998711  0.85116312] reward: 1.0 action: 0\n",
      "value_next [6342920.5] td_target [6025775.5] td_error [926088.]\n",
      "state: [-0.01010946 -0.74355007  0.03701037  1.15006343] reward: 1.0 action: 0\n",
      "value_next [7624459.5] td_target [7243237.5] td_error [890752.5]\n",
      "state: [-0.02498046 -0.93913498  0.06001164  1.45411845] reward: 1.0 action: 0\n",
      "value_next [8941098.] td_target [8494044.] td_error [858240.]\n",
      "state: [-0.04376316 -1.13494026  0.08909401  1.76493049] reward: 1.0 action: 0\n",
      "value_next [10297356.] td_target [9782489.] td_error [828077.]\n",
      "state: [-0.06646196 -1.33094925  0.12439262  2.08393685] reward: 1.0 action: 0\n",
      "value_next [11697442.] td_target [11112571.] td_error [799661.]\n",
      "state: [-0.09308095 -1.52709022  0.16607136  2.41235219] reward: 1.0 action: 0\n",
      "value_next [13145038.] td_target [12487787.] td_error [772218.]\n",
      "state: [-0.12362275 -1.72321962  0.2143184   2.75109956] reward: 1.0 action: 0\n",
      "value_next [14643069.] td_target [13910916.] td_error [744787.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 187/200\n",
      "========================================================\n",
      "state: [-0.04708196 -0.17432238 -0.0317503   0.24161132] reward: 1.0 action: 0\n",
      "value_next [3946939.2] td_target [3749593.2] td_error [990238.25]\n",
      "state: [-0.05056841 -0.36897675 -0.02691807  0.52411269] reward: 1.0 action: 0\n",
      "value_next [5159525.5] td_target [4901550.] td_error [948492.]\n",
      "state: [-0.05794794 -0.56370973 -0.01643582  0.80819339] reward: 1.0 action: 0\n",
      "value_next [6397359.5] td_target [6077492.5] td_error [910266.]\n",
      "state: [-6.92221363e-02 -7.58602636e-01 -2.71950470e-04  1.09566135e+00] reward: 1.0 action: 0\n",
      "value_next [7665238.] td_target [7281977.] td_error [875338.5]\n",
      "state: [-0.08439419 -0.953721    0.02164128  1.38825894] reward: 1.0 action: 0\n",
      "value_next [8967983.] td_target [8519585.] td_error [843392.]\n",
      "state: [-0.10346861 -1.14910584  0.04940646  1.68762961] reward: 1.0 action: 0\n",
      "value_next [10310315.] td_target [9794800.] td_error [814001.]\n",
      "state: [-0.12645073 -1.34476306  0.08315905  1.99527689] reward: 1.0 action: 0\n",
      "value_next [11696701.] td_target [11111867.] td_error [786619.]\n",
      "state: [-0.15334599 -1.54065063  0.12306459  2.31251344] reward: 1.0 action: 0\n",
      "value_next [13131171.] td_target [12474613.] td_error [760542.]\n",
      "state: [-0.184159   -1.73666318  0.16931485  2.64039836] reward: 1.0 action: 0\n",
      "value_next [14617110.] td_target [13886255.] td_error [734900.]\n",
      "state: [-0.21889226 -1.93261396  0.22212282  2.9796626 ] reward: 1.0 action: 0\n",
      "value_next [16156994.] td_target [15349145.] td_error [708616.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 188/200\n",
      "========================================================\n",
      "state: [ 0.00264538 -0.19475667  0.00894703  0.30085987] reward: 1.0 action: 0\n",
      "value_next [4197882.5] td_target [3987989.2] td_error [1031757.]\n",
      "state: [-0.00124976 -0.39000499  0.01496422  0.59635103] reward: 1.0 action: 0\n",
      "value_next [5466545.] td_target [5193218.5] td_error [988860.]\n",
      "state: [-0.00904986 -0.58533313  0.02689124  0.89370975] reward: 1.0 action: 0\n",
      "value_next [6762246.5] td_target [6424135.] td_error [949435.5]\n",
      "state: [-0.02075652 -0.78080926  0.04476544  1.19472301] reward: 1.0 action: 0\n",
      "value_next [8089819.5] td_target [7685329.5] td_error [913229.5]\n",
      "state: [-0.0363727  -0.97648137  0.0686599   1.50109377] reward: 1.0 action: 0\n",
      "value_next [9454067.] td_target [8981365.] td_error [879866.]\n",
      "state: [-0.05590233 -1.17236663  0.09868178  1.81439951] reward: 1.0 action: 0\n",
      "value_next [10859603.] td_target [10316624.] td_error [848833.]\n",
      "state: [-0.07934966 -1.36843897  0.13496977  2.13604253] reward: 1.0 action: 0\n",
      "value_next [12310679.] td_target [11695146.] td_error [819480.]\n",
      "state: [-0.10671844 -1.56461427  0.17769062  2.46719003] reward: 1.0 action: 0\n",
      "value_next [13810956.] td_target [13120409.] td_error [790974.]\n",
      "state: [-0.13801073 -1.76073324  0.22703442  2.80870306] reward: 1.0 action: 0\n",
      "value_next [15363264.] td_target [14595102.] td_error [762285.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 189/200\n",
      "=========================================================\n",
      "state: [-0.00624043 -0.1801849   0.01635412  0.34562933] reward: 1.0 action: 0\n",
      "value_next [4363828.] td_target [4145637.5] td_error [1047056.75]\n",
      "state: [-0.00984412 -0.37553562  0.0232667   0.64342413] reward: 1.0 action: 0\n",
      "value_next [5657393.] td_target [5374524.5] td_error [1003982.]\n",
      "state: [-0.01735484 -0.57097399  0.03613518  0.94334213] reward: 1.0 action: 0\n",
      "value_next [6979137.] td_target [6630181.] td_error [964378.5]\n",
      "state: [-0.02877432 -0.76656368  0.05500203  1.24715657] reward: 1.0 action: 0\n",
      "value_next [8333940.] td_target [7917244.] td_error [927968.5]\n",
      "state: [-0.04410559 -0.96234613  0.07994516  1.5565486 ] reward: 1.0 action: 0\n",
      "value_next [9726627.] td_target [9240297.] td_error [894348.5]\n",
      "state: [-0.06335251 -1.15832951  0.11107613  1.87306358] reward: 1.0 action: 0\n",
      "value_next [11161797.] td_target [10603708.] td_error [862970.]\n",
      "state: [-0.0865191  -1.35447563  0.1485374   2.19805887] reward: 1.0 action: 0\n",
      "value_next [12643635.] td_target [12011454.] td_error [833134.]\n",
      "state: [-0.11360862 -1.55068461  0.19249858  2.53264113] reward: 1.0 action: 0\n",
      "value_next [14175682.] td_target [13466899.] td_error [803953.]\n",
      "state: [-0.14462231 -1.74677723  0.2431514   2.87759281] reward: 1.0 action: 0\n",
      "value_next [15760573.] td_target [14972545.] td_error [774339.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 190/200\n",
      "=========================================================\n",
      "state: [-0.03544519 -0.22872136  0.04403579  0.33074616] reward: 1.0 action: 0\n",
      "value_next [4620212.] td_target [4389202.5] td_error [1075710.]\n",
      "state: [-0.04001961 -0.42444158  0.05065072  0.63698423] reward: 1.0 action: 0\n",
      "value_next [5955988.] td_target [5658189.5] td_error [1030809.5]\n",
      "state: [-0.04850845 -0.62023193  0.0633904   0.94517805] reward: 1.0 action: 0\n",
      "value_next [7320322.5] td_target [6954307.5] td_error [989377.5]\n",
      "state: [-0.06091308 -0.81614782  0.08229396  1.25708594] reward: 1.0 action: 0\n",
      "value_next [8718119.] td_target [8282214.] td_error [951127.]\n",
      "state: [-0.07723604 -1.01222099  0.10743568  1.57436749] reward: 1.0 action: 0\n",
      "value_next [10154212.] td_target [9646502.] td_error [915631.]\n",
      "state: [-0.09748046 -1.20844751  0.13892303  1.89853556] reward: 1.0 action: 0\n",
      "value_next [11633171.] td_target [11051513.] td_error [882307.]\n",
      "state: [-0.12164941 -1.40477386  0.17689374  2.23090002] reward: 1.0 action: 0\n",
      "value_next [13159098.] td_target [12501144.] td_error [850401.]\n",
      "state: [-0.14974489 -1.6010809   0.22151174  2.57250149] reward: 1.0 action: 0\n",
      "value_next [14735373.] td_target [13998605.] td_error [818964.]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 191/200\n",
      "=========================================================\n",
      "state: [ 0.03062073 -0.22051811 -0.04819743  0.26313475] reward: 1.0 action: 0\n",
      "value_next [4166685.2] td_target [3958352.] td_error [1032779.]\n",
      "state: [ 0.02621036 -0.41492015 -0.04293474  0.54023462] reward: 1.0 action: 0\n",
      "value_next [5434228.] td_target [5162517.5] td_error [989565.25]\n",
      "state: [ 0.01791196 -0.60941311 -0.03213005  0.81908627] reward: 1.0 action: 0\n",
      "value_next [6728588.5] td_target [6392160.] td_error [950069.5]\n",
      "state: [ 0.0057237  -0.80408092 -0.01574832  1.10149266] reward: 1.0 action: 0\n",
      "value_next [8054840.5] td_target [7652099.5] td_error [914052.5]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.01035792 -0.99899216  0.00628153  1.38919345] reward: 1.0 action: 0\n",
      "value_next [9418075.] td_target [8947172.] td_error [881178.5]\n",
      "state: [-0.03033776 -1.1941918   0.0340654   1.6838339 ] reward: 1.0 action: 0\n",
      "value_next [10823280.] td_target [10282117.] td_error [851011.]\n",
      "state: [-0.0542216  -1.38969112  0.06774208  1.98692603] reward: 1.0 action: 0\n",
      "value_next [12275185.] td_target [11661427.] td_error [822984.]\n",
      "state: [-0.08201542 -1.58545527  0.1074806   2.29979969] reward: 1.0 action: 0\n",
      "value_next [13778090.] td_target [13089186.] td_error [796385.]\n",
      "state: [-0.11372453 -1.78138829  0.15347659  2.62354177] reward: 1.0 action: 0\n",
      "value_next [15335636.] td_target [14568855.] td_error [770324.]\n",
      "state: [-0.14935229 -1.9773153   0.20594743  2.95892296] reward: 1.0 action: 0\n",
      "value_next [16950548.] td_target [16103021.] td_error [743693.]\n",
      "state: [-0.1888986  -2.17296265  0.26512589  3.30631368] reward: 1.0 action: 0\n",
      "value_next [18624314.] td_target [17693100.] td_error [715156.]\n",
      "reward:11.0, max reward:12.0, episode len:11\n",
      "\n",
      "Episode 192/200\n",
      "==========================================================\n",
      "state: [ 0.00866609 -0.18979147 -0.006952    0.24285694] reward: 1.0 action: 0\n",
      "value_next [4225367.5] td_target [4014100.] td_error [1085640.8]\n",
      "state: [ 0.00487026 -0.38481344 -0.00209486  0.53333894] reward: 1.0 action: 0\n",
      "value_next [5549395.] td_target [5271926.] td_error [1040059.]\n",
      "state: [-0.00282601 -0.57990586  0.00857192  0.82536105] reward: 1.0 action: 0\n",
      "value_next [6900902.5] td_target [6555858.5] td_error [998207.]\n",
      "state: [-0.01442412 -0.77514399  0.02507914  1.12072761] reward: 1.0 action: 0\n",
      "value_next [8284996.5] td_target [7870747.5] td_error [959844.5]\n",
      "state: [-0.029927   -0.97058575  0.04749369  1.42117054] reward: 1.0 action: 0\n",
      "value_next [9706788.] td_target [9221449.] td_error [924609.5]\n",
      "state: [-0.04933872 -1.16626198  0.0759171   1.72831182] reward: 1.0 action: 0\n",
      "value_next [11171238.] td_target [10612677.] td_error [892012.]\n",
      "state: [-0.07266396 -1.36216503  0.11048334  2.0436182 ] reward: 1.0 action: 0\n",
      "value_next [12682996.] td_target [12048847.] td_error [861422.]\n",
      "state: [-0.09990726 -1.55823495  0.15135571  2.36834596] reward: 1.0 action: 0\n",
      "value_next [14246175.] td_target [13533867.] td_error [832035.]\n",
      "state: [-0.13107196 -1.75434325  0.19872262  2.70347428] reward: 1.0 action: 0\n",
      "value_next [15864105.] td_target [15070901.] td_error [802842.]\n",
      "state: [-0.16615882 -1.9502743   0.25279211  3.04962752] reward: 1.0 action: 0\n",
      "value_next [17539050.] td_target [16662098.] td_error [772615.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 193/200\n",
      "==========================================================\n",
      "state: [-2.18917491e-04 -1.89466403e-01  2.80865827e-02  3.23523610e-01] reward: 1.0 action: 0\n",
      "value_next [4597522.] td_target [4367647.] td_error [1119520.]\n",
      "state: [-0.00400825 -0.3849768   0.03455705  0.6249299 ] reward: 1.0 action: 0\n",
      "value_next [5976611.] td_target [5677781.5] td_error [1073204.5]\n",
      "state: [-0.01170778 -0.5805637   0.04705565  0.92829286] reward: 1.0 action: 0\n",
      "value_next [7385304.] td_target [7016039.5] td_error [1030545.]\n",
      "state: [-0.02331906 -0.77628827  0.06562151  1.235384  ] reward: 1.0 action: 0\n",
      "value_next [8828723.] td_target [8387287.5] td_error [991250.5]\n",
      "state: [-0.03884482 -0.97218936  0.09032919  1.54788204] reward: 1.0 action: 0\n",
      "value_next [10311937.] td_target [9796341.] td_error [954899.]\n",
      "state: [-0.05828861 -1.16827212  0.12128683  1.86732775] reward: 1.0 action: 0\n",
      "value_next [11839767.] td_target [11247780.] td_error [920904.]\n",
      "state: [-0.08165405 -1.36449473  0.15863339  2.19507038] reward: 1.0 action: 0\n",
      "value_next [13416582.] td_target [12745754.] td_error [888512.]\n",
      "state: [-0.10894395 -1.56075281  0.20253479  2.53220406] reward: 1.0 action: 0\n",
      "value_next [15046058.] td_target [14293756.] td_error [856778.]\n",
      "state: [-0.140159   -1.75686166  0.25317887  2.87949356] reward: 1.0 action: 0\n",
      "value_next [16730888.] td_target [15894344.] td_error [824530.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 194/200\n",
      "==========================================================\n",
      "state: [ 0.01628423 -0.2388737   0.04638179  0.27658633] reward: 1.0 action: 0\n",
      "value_next [4672631.] td_target [4439000.5] td_error [1150798.5]\n",
      "state: [ 0.01150676 -0.43462563  0.05191352  0.5835298 ] reward: 1.0 action: 0\n",
      "value_next [6086474.] td_target [5782151.] td_error [1102383.]\n",
      "state: [ 0.00281424 -0.63043494  0.06358411  0.8921038 ] reward: 1.0 action: 0\n",
      "value_next [7529611.] td_target [7153131.5] td_error [1057669.5]\n",
      "state: [-0.00979446 -0.82635911  0.08142619  1.2040773 ] reward: 1.0 action: 0\n",
      "value_next [9007220.] td_target [8556860.] td_error [1016380.]\n",
      "state: [-0.02632164 -1.02243372  0.10550774  1.5211276 ] reward: 1.0 action: 0\n",
      "value_next [10524422.] td_target [9998202.] td_error [978083.]\n",
      "state: [-0.04677031 -1.21866063  0.13593029  1.84479296] reward: 1.0 action: 0\n",
      "value_next [12086087.] td_target [11481784.] td_error [942185.]\n",
      "state: [-0.07114353 -1.41499435  0.17282615  2.17641753] reward: 1.0 action: 0\n",
      "value_next [13696624.] td_target [13011794.] td_error [907923.]\n",
      "state: [-0.09944341 -1.61132628  0.2163545   2.51708652] reward: 1.0 action: 0\n",
      "value_next [15359709.] td_target [14591724.] td_error [874318.]\n",
      "reward:8.0, max reward:12.0, episode len:8\n",
      "\n",
      "Episode 195/200\n",
      "==========================================================\n",
      "state: [-0.04414444 -0.20958493 -0.03963301  0.32409426] reward: 1.0 action: 0\n",
      "value_next [4715949.5] td_target [4480153.] td_error [1087025.2]\n",
      "state: [-0.04833614 -0.4041208  -0.03315113  0.60401985] reward: 1.0 action: 0\n",
      "value_next [6068262.5] td_target [5764850.5] td_error [1041962.5]\n",
      "state: [-0.05641856 -0.59876381 -0.02107073  0.88607931] reward: 1.0 action: 0\n",
      "value_next [7450147.5] td_target [7077641.] td_error [1000779.5]\n",
      "state: [-0.06839383 -0.79359348 -0.00334914  1.1720646 ] reward: 1.0 action: 0\n",
      "value_next [8866979.] td_target [8423631.] td_error [963203.]\n",
      "state: [-0.0842657  -0.98867173  0.02009215  1.46369569] reward: 1.0 action: 0\n",
      "value_next [10324115.] td_target [9807910.] td_error [928846.]\n",
      "state: [-0.10403914 -1.18403399  0.04936606  1.76258663] reward: 1.0 action: 0\n",
      "value_next [11826763.] td_target [11235426.] td_error [897211.]\n",
      "state: [-0.12771982 -1.37967835  0.0846178   2.07020338] reward: 1.0 action: 0\n",
      "value_next [13379807.] td_target [12710817.] td_error [867654.]\n",
      "state: [-0.15531339 -1.5755523   0.12602186  2.38781101] reward: 1.0 action: 0\n",
      "value_next [14987599.] td_target [14238220.] td_error [839362.]\n",
      "state: [-0.18682443 -1.77153682  0.17377808  2.71640892] reward: 1.0 action: 0\n",
      "value_next [16653713.] td_target [15821028.] td_error [811319.]\n",
      "state: [-0.22225517 -1.96742789  0.22810626  3.05665392] reward: 1.0 action: 0\n",
      "value_next [18380650.] td_target [17461620.] td_error [782283.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 196/200\n",
      "===========================================================\n",
      "state: [-0.01986462 -0.18632607  0.01186942  0.26786976] reward: 1.0 action: 0\n",
      "value_next [4622628.] td_target [4391497.5] td_error [1156753.2]\n",
      "state: [-0.02359114 -0.38161539  0.01722681  0.56427263] reward: 1.0 action: 0\n",
      "value_next [6039700.] td_target [5737716.] td_error [1108242.]\n",
      "state: [-0.03122345 -0.57697476  0.02851226  0.86233263] reward: 1.0 action: 0\n",
      "value_next [7486287.5] td_target [7111974.] td_error [1063635.]\n",
      "state: [-0.04276294 -0.77247309  0.04575892  1.1638424 ] reward: 1.0 action: 0\n",
      "value_next [8967783.] td_target [8519395.] td_error [1022659.5]\n",
      "state: [-0.0582124  -0.96815996  0.06903577  1.47051389] reward: 1.0 action: 0\n",
      "value_next [10489550.] td_target [9965073.] td_error [984905.]\n",
      "state: [-0.0775756  -1.16405511  0.09844604  1.78393707] reward: 1.0 action: 0\n",
      "value_next [12056756.] td_target [11453919.] td_error [949816.]\n",
      "state: [-0.1008567  -1.36013606  0.13412478  2.10553079] reward: 1.0 action: 0\n",
      "value_next [13674162.] td_target [12990455.] td_error [916672.]\n",
      "state: [-0.12805942 -1.5563235   0.1762354   2.43648351] reward: 1.0 action: 0\n",
      "value_next [15345898.] td_target [14578604.] td_error [884560.]\n",
      "state: [-0.15918589 -1.75246434  0.22496507  2.77768305] reward: 1.0 action: 0\n",
      "value_next [17075174.] td_target [16221416.] td_error [852342.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n",
      "Episode 197/200\n",
      "===========================================================\n",
      "state: [ 0.0406954  -0.2144116  -0.03791562  0.23405117] reward: 1.0 action: 0\n",
      "value_next [4432817.] td_target [4211177.] td_error [1137435.8]\n",
      "state: [ 0.03640717 -0.40897188 -0.0332346   0.51453731] reward: 1.0 action: 0\n",
      "value_next [5819773.5] td_target [5528786.] td_error [1089533.]\n",
      "state: [ 0.02822773 -0.60361041 -0.02294385  0.7965647 ] reward: 1.0 action: 0\n",
      "value_next [7235374.5] td_target [6873606.5] td_error [1045690.]\n",
      "state: [ 0.01615552 -0.79841013 -0.00701256  1.08194247] reward: 1.0 action: 0\n",
      "value_next [8685124.] td_target [8250868.5] td_error [1005652.5]\n",
      "state: [ 1.87320073e-04 -9.93438817e-01  1.46262886e-02  1.37241664e+00] reward: 1.0 action: 0\n",
      "value_next [10174549.] td_target [9665822.] td_error [969061.]\n",
      "state: [-0.01968146 -1.18874057  0.04207462  1.6696379 ] reward: 1.0 action: 0\n",
      "value_next [11709054.] td_target [11123602.] td_error [935433.]\n",
      "state: [-0.04345627 -1.3843255   0.07546738  1.97512195] reward: 1.0 action: 0\n",
      "value_next [13293757.] td_target [12629070.] td_error [904151.]\n",
      "state: [-0.07114278 -1.58015725  0.11496982  2.29019984] reward: 1.0 action: 0\n",
      "value_next [14933302.] td_target [14186638.] td_error [874434.]\n",
      "state: [-0.10274592 -1.77613782  0.16077382  2.61595668] reward: 1.0 action: 0\n",
      "value_next [16631603.] td_target [15800024.] td_error [845300.]\n",
      "state: [-0.13826868 -1.97208979  0.21309295  2.95315818] reward: 1.0 action: 0\n",
      "value_next [18391562.] td_target [17471984.] td_error [815539.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 198/200\n",
      "===========================================================\n",
      "state: [-0.0275687  -0.17874146 -0.0352619   0.32887998] reward: 1.0 action: 0\n",
      "value_next [4816146.] td_target [4575339.5] td_error [1141027.5]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "state: [-0.03114353 -0.37334414 -0.0286843   0.6102377 ] reward: 1.0 action: 0\n",
      "value_next [6228594.] td_target [5917165.] td_error [1094051.5]\n",
      "state: [-0.03861041 -0.56805365 -0.01647955  0.89374978] reward: 1.0 action: 0\n",
      "value_next [7671992.5] td_target [7288394.] td_error [1051099.5]\n",
      "state: [-0.04997148 -0.76294827  0.00139545  1.18120725] reward: 1.0 action: 0\n",
      "value_next [9151909.] td_target [8694314.] td_error [1011879.]\n",
      "state: [-0.06523045 -0.9580883   0.02501959  1.47432728] reward: 1.0 action: 0\n",
      "value_next [10673895.] td_target [10140201.] td_error [975991.]\n",
      "state: [-0.08439221 -1.15350692  0.05450614  1.77471845] reward: 1.0 action: 0\n",
      "value_next [12243330.] td_target [11631164.] td_error [942904.]\n",
      "state: [-0.10746235 -1.34919912  0.0900005   2.08383769] reward: 1.0 action: 0\n",
      "value_next [13865249.] td_target [13171987.] td_error [911937.]\n",
      "state: [-0.13444633 -1.54510828  0.13167726  2.40293652] reward: 1.0 action: 0\n",
      "value_next [15544115.] td_target [14766910.] td_error [882229.]\n",
      "state: [-0.1653485  -1.74111006  0.17973599  2.73299536] reward: 1.0 action: 0\n",
      "value_next [17283566.] td_target [16419388.] td_error [852710.]\n",
      "state: [-0.2001707  -1.93699376  0.2343959   3.07464598] reward: 1.0 action: 0\n",
      "value_next [19086094.] td_target [18131792.] td_error [822068.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 199/200\n",
      "============================================================\n",
      "state: [ 0.03670649 -0.22736851 -0.02822736  0.26542099] reward: 1.0 action: 0\n",
      "value_next [4721031.] td_target [4484980.5] td_error [1173046.2]\n",
      "state: [ 0.03215912 -0.42207645 -0.02291894  0.5490688 ] reward: 1.0 action: 0\n",
      "value_next [6159744.] td_target [5851757.5] td_error [1123886.]\n",
      "state: [ 0.02371759 -0.61686908 -0.01193756  0.83444343] reward: 1.0 action: 0\n",
      "value_next [7628669.] td_target [7247236.5] td_error [1078860.]\n",
      "state: [ 0.01138021 -0.81182592  0.00475131  1.1233483 ] reward: 1.0 action: 0\n",
      "value_next [9133452.] td_target [8676780.] td_error [1037693.5]\n",
      "state: [-0.00485631 -1.00700984  0.02721827  1.41751775] reward: 1.0 action: 0\n",
      "value_next [10679739.] td_target [10145753.] td_error [999993.]\n",
      "state: [-0.02499651 -1.20245801  0.05556863  1.7185826 ] reward: 1.0 action: 0\n",
      "value_next [12273024.] td_target [11659374.] td_error [965240.]\n",
      "state: [-0.04904567 -1.39817104  0.08994028  2.02802778] reward: 1.0 action: 0\n",
      "value_next [13918466.] td_target [13222544.] td_error [932762.]\n",
      "state: [-0.07700909 -1.59409986  0.13050083  2.34713993] reward: 1.0 action: 0\n",
      "value_next [15620682.] td_target [14839649.] td_error [901712.]\n",
      "state: [-0.10889109 -1.7901299   0.17744363  2.67694311] reward: 1.0 action: 0\n",
      "value_next [17383474.] td_target [16514301.] td_error [871024.]\n",
      "state: [-0.14469368 -1.98606283  0.2309825   3.01812284] reward: 1.0 action: 0\n",
      "value_next [19209534.] td_target [18249060.] td_error [839408.]\n",
      "reward:10.0, max reward:12.0, episode len:10\n",
      "\n",
      "Episode 200/200\n",
      "============================================================\n",
      "state: [-0.00690213 -0.15766702  0.01359794  0.25482119] reward: 1.0 action: 0\n",
      "value_next [4737537.] td_target [4500661.] td_error [1230369.]\n",
      "state: [-0.01005547 -0.35298046  0.01869437  0.55176193] reward: 1.0 action: 0\n",
      "value_next [6234907.] td_target [5923162.5] td_error [1178786.]\n",
      "state: [-0.01711507 -0.54835991  0.02972961  0.85027572] reward: 1.0 action: 0\n",
      "value_next [7763098.] td_target [7374944.] td_error [1131331.]\n",
      "state: [-0.02808227 -0.74387437  0.04673512  1.15215703] reward: 1.0 action: 0\n",
      "value_next [9327778.] td_target [8861390.] td_error [1087722.]\n",
      "state: [-0.04295976 -0.93957383  0.06977826  1.45912083] reward: 1.0 action: 0\n",
      "value_next [10934593.] td_target [10387864.] td_error [1047529.]\n",
      "state: [-0.06175124 -1.13547876  0.09896068  1.77276128] reward: 1.0 action: 0\n",
      "value_next [12588981.] td_target [11959533.] td_error [1010172.]\n",
      "state: [-0.08446081 -1.33156773  0.1344159   2.09450263] reward: 1.0 action: 0\n",
      "value_next [14295963.] td_target [13581166.] td_error [974893.]\n",
      "state: [-0.11109217 -1.52776289  0.17630596  2.42554016] reward: 1.0 action: 0\n",
      "value_next [16059892.] td_target [15256898.] td_error [940733.]\n",
      "state: [-0.14164742 -1.72391301  0.22481676  2.7667702 ] reward: 1.0 action: 0\n",
      "value_next [17884164.] td_target [16989956.] td_error [906503.]\n",
      "reward:9.0, max reward:12.0, episode len:9\n",
      "\n"
     ]
    }
   ],
   "source": [
    "tf.reset_default_graph()\n",
    "mcpg = Actor_Critic(env, num_episodes=200)\n",
    "result = mcpg.mcpg_learn()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAxUAAAFRCAYAAAACS4BZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzsnXecJEd96L81YXdnL+h0t0LSkUQy2TghkknGZAwGHk2wSQbL2Bgcnv1sP5vghA3m+QHGSYAMxiY08MiYDAYkJBBKSAgkgYTy3e3dXtyZ3Qn1/uiunpqajhN2b3t+389nPzvTXdNdXV1dXb9YSmuNIAiCIAiCIAjCqFQ2uwKCIAiCIAiCIGxtRKgQBEEQBEEQBGEsRKgQBEEQBEEQBGEsRKgQBEEQBEEQBGEsRKgQBEEQBEEQBGEsRKgQBEEQBEEQBGEsRKgQBEGYIEqpdyulvrgB59FKqV+d9nk2m1m5Tti4viMIgjANRKgQBEEgmtDpmL/jBQ/1O8BzplHHSaOUer1S6rrNrgeAUuqdSqmvbnY9poFS6oaEvhX9hUW3TN8RBEFwqW12BQRBEE4ivg54zrZekQNorY9MrjrCVkMpNae1Xnc2Pxiohp/PBC4Bng1cYBeSviMIwlZGLBWCIAh91rXWtzt/+81OpdRXlVLnKaX+Vim1rJQ6GmrYG1aZARcWpdT9lVKfU0odVkqdUEpdrZR6obX/TKXUB8L9zfAcP2dXSin1WKXUFUqpVvj/sW7FlVKnh+c+oJQ6ppQ6Xyn1qHEbRCn1KqXU98NzX6uU+lOlVM3af4NS6i+UUm9VSh1SSu1TSr1ZKVW1yjSUUucqpY4opVaUUv+klPobYyVRSr0eeBnwaEt7/xKrGjuVUu8Nr+smpdT/ylHvhyqlvha26YpS6n1KqTuE++4VnuPhzm8eEm6/T/h9e3hdtyilVpVSlyqlnmWVPyss/ytKqc8opU4Ab3DrorU+YPoTcCDcfMjuZ+Hx3L7zbqXUF8N7cLNS6njY3+pKqVcopX4cXtu5Sqm5IvdNEARh0ohQIQiCUIz/AewBHgn8CvB04I0p5d8PHAQeDjwQ+H1gBUAppYCPAfcBngacDewDvqCUWgrL7AU+BXwH+BngfwJvtU8QCjVfAXYATwZ+GvhMeJz7jnqh4WT/D4A/Ae5L4J7zG8DrnKKvAm4DHgK8Gvhd4EXW/jcCzwBeCDwUOAL8lrX/zcD7gG8SaPLPBD5o7X8d8DXgp4C/A94YJ1hZ9T4D+DxwM0Gb/hLwAOAjAFrra4ELgRc7P30h8C2t9ffDe/NJ4EHAc8Pf/zPwAaXU45zfvTGs/wOBf0yq14g8GPg54PHAC4BfBT5O0J+eHNb5hQRCGVDovgmCIEwOrbX8yZ/8yd/M/wHvBjrAcefvk1aZrwI3AFVr2znAGrDNOs4Xrf1HgJcknPNxgAbuZ22bJ5igvzb8/lfAj4GaVeZp4e9+Nfz+EoIJdM05/peBt6Rc8+uB6xL2LQKrwJOc7S8CDlvfbwA+4ZT5LPD+8PO2sH1e5pS50D438E7gqzH10MDbnG3fB/4m5br+MmyPOWvbg8JjPSr8/goC4W4+/F4nsCK8Mvz+GKAFnOIc+zzgY+Hns8JjvqZAP7tT+JvHJPTBLzrf9zvX8Wlg2dQ73PZx4MNF7pv8yZ/8yd+k/8QUKgiC0OcihrXXq873b2mtu9b384E54B7AFTHHfDPwztCd56sEE/BLwn33Bw5qrb9nCmut15RSF4X7AO4XnrNjHfMbzjkeDJwBHA4U7BHzQDOmTnm4P9AAPmIFEkMQG7CglDpNa21ceS5zfnsLcLfw8z0J2udCp8w3CSwIeYg7/ukZdb9QW7ENWuvLlVJHwn1fI7CEvIXA0vQh4CnATuAD4U8eHNb7FqdN54BrnfN9K+d1jMLVejBG43bgB1rrNWebsUgVuW+CIAgTQ4QKQRCEPk2tddFsSCptp9b6L5VS/wk8CfgF4H8rpd6ktf4zUyThmDrmc3RY53sFuBp4ZsyxXKEoL8Y99jnANTH7D1mf3cBkzbB7bdx15iXP8V2SzheYP7ReUUp9kkCD/6Hw/6e11gfDchUCK9ODc9TnREZdxqHtfNcJ20x7FLlvgiAIE0OECkEQhGI8WClVtawVDyOYZP4w6Qda6x8B/wT8k1Lqj4E/BP4MuApYUkrdz1grlFLzBHEA/xT+/Crghc45f945xcUEk+Kj2gosH5OrCNx/7q61/swYx7mOoH0eBnzP2v5Qp9w6/QxJ43IV8FJlZWJSSj0IOCXcZ/h34P8ppe4NPJUgdsJwMbALWNBaXzmhem0Ek7pvgiAIhRChQhAEoc9cGOTrsk9rbTTfe4B/VEq9Fbg7gf/+O7TWQ9pqpdR2giDejwDXE0xSn0R/cv1lAteZ9ymlXkmgGX8NsEAQFEz4//eBc5VSbwb2An/tnOo/gd8DPq2U+lMCDfXpBJaRq7XWH8u45p9ytvW01lcopd4AvCF0//kCwTvjgcBPa63/KOWYEVrrE0qpfwX+Sim1L6zbiwncdWw3nOuB5yil7k8QrH7McfEpwtsJgpPfHV7DLgIh7Rta669b5f6LQHP/AeAYQXC74cvAFwmEjj8CLgdOJQiQbmmt3zFi3aaK1vr4JO6bIAhCUUSoEARB6PNIgiBpl9MIgmMBPkwwAf0GgX/9h4CkFKcdgonouwgyGh0lyNL0BwBaa62U+mXg/xIE4M4TCBmP11ovh2VuUUr9EoH//2UE/vyvBr5kTqK1bimlHk0Q1P1vYX0PhMf6bMY13xm41Nm2RqCh/0ul1K0E2Z3eTBCfcQ1BAHER/ohAUHofwbof7wuPYWdRehfwWIK1G3YCLx3hPABorfcppZ4AvAn4dng9nyHISmWX6yil3hduf7vWum3t00qppxNkTPp74I4EAshl4XFPWiZ43wRBEHKj+so3QRAEIQ0VrPh8ndb65Ztdl62OUurLwIrW+tmbXRdBEARhfMRSIQiCIEwVpdQDCdbY+CaBdeeFBFaJp2xmvQRBEITJIUKFIAiCMG008JvA2wiyE30feKbW+r82tVaCIAjCxBD3J0EQBEEQBEEQxiIrz7cgCIIgCIIgCEIqIlQIgiAIgiAIgjAWZY2pEJ8uQRAEQRAEQZgMKqtAWYUKbr311k09/9LSEsvLy9kFhUykLSeHtOXkkLacHNKWk0PacjJIO04OacvJsVltuXfv3lzlxP1JEARBEARBEISxEKFCEARBEARBEISxEKFCEARBEARBEISxEKFCEARBEARBEISxEKFCEARBEARBEISxEKFCEARBEARBEISxEKFCEARBEARBEISx2JB1KjzPOw94GrDf9/0HhNueA7weuC9wtu/7Fyf89knAW4Eq8E7f9/92I+osCIIgCIIgCEI+NspS8W7gSc62K4FnAV9L+pHneVXgH4EnA/cDnu953v2mVEdBEARBEARBEEZgQ4QK3/e/Bhxytl3t+/4PMn56NnCd7/s/8n1/HfgA8IwpVXNi6AO3s/r5j6FPHN/sqgiCIAiCIAjC1NkQ96cxuCNwk/X9ZuAhcQU9zzsHOAfA932WlpamX7sEWtdcwZF/fhO73/Je6ptYj7JQq9U29X6WCWnLySFtOTmkLSeHtOVkkHacHNKWk+Nkb8uTXahQMdt0XEHf988FzjVllpeXp1apLHSzBcDhA/tR207ZtHqUhaWlJTbzfpYJacvJIW05OaQtJ4e05WSQdpwc0paTY7Pacu/evbnKnezZn24G7mx9vxNw6ybVJT+1evC/097cegiCIAiCIAjCBnCyWyq+DdzL87y7AbcAzwNesLlVyoERKtoiVAiCIAiCIAjlZ6NSyr4feAyw5HnezcDrCAK3/wE4Dfi053mX+b7/RM/z9hKkjn2K7/sdz/N+G/gcQUrZ83zfv2oj6jwWtbBZO53NrYcgCIIgCIIgbAAbIlT4vv/8hF0fjSl7K/AU6/tngM9MqWrTQdyfBEEQBEEQhBniZI+p2JqEQoUWS4UgCIIgCIIwA4hQMQ3qxv1JLBWCIAiCIAhC+RGhYhqI+5MgCIIgCIIwQ4hQMQ1EqBAEQRAEQRBmCBEqpoGklBUEQRAEQRBmCBEqpkFdLBWCIAiCIAjC7CBCxTSoSqC2IAiCIAiCMDuIUDEFVKUC1aosficIgiAIgiDMBCJUTAlVnxNLhSAIgiAIgjATiFAxLWp1ESoEQRAEQRCEmUCEiikRWCrE/UkQBEEQBEEoPyJUTIt6XVLKCoIgCIIgCDOBCBVTQtXF/UkQBEEQBEGYDUSomBKqPocWoUIQBEEQBEGYAUSomBa1mlgqBEEQBEEQhJlAhIopoWoSqC0IgiAIgiDMBiJUTAmJqRAEQRAEQRBmBREqpkV9TrI/CYIgCIIgCDOBCBVTQiwVgiAIgiAIwqwgQsW0qNclpkIQBEEQBEGYCUSomBJBoLZYKgRBEARBEITyU9uIk3iedx7wNGC/7/sPCLftBj4InAXcAHi+76/E/LYLfDf8eqPv+0/fiDqPi5KUsoIgCIIgCMKMsFGWincDT3K2/THwJd/37wV8KfweR9P3/Z8K/7aEQAEEgdri/iQIgiAIgiDMABsiVPi+/zXgkLP5GcB7ws/vAX55I+qyUUigtiAIgiAIgjArbGZMxem+798GEP6/Q0K5Bc/zLvY870LP87aO4FGXmApBEARBEARhNtiQmIoxuYvv+7d6nnd34Mue533X9/0fuoU8zzsHOAfA932WlpY2up4DrM7PQ7fLnt27URWJhx+HWq226fezLEhbTg5py8khbTk5pC0ng7Tj5JC2nBwne1tuplCxz/O8M33fv83zvDOB/XGFfN+/Nfz/I8/zvgr8NDAkVPi+fy5wbvhVLy8vT6fWOWlUqgAs334bam5+U+uy1VlaWmKz72dZkLacHNKWk0PacnJIW04GacfJIW05OTarLffu3Zur3Gaq0D8BvDj8/GLg424Bz/NO9TxvPvy8BDwC+N6G1XAc6nPBf3GBEgRBEARBEErORqWUfT/wGGDJ87ybgdcBfwv4nue9DLgReE5Y9ueAV/i+/3LgvsC/ep7XIxCA/tb3/S0hVKh6PfggQoUgCIIgCIJQcjZEqPB9//kJux4XU/Zi4OXh5wuAB06xatOjFgoVbUkrKwiCIAiCIJQbiSCeEpGloiuWCkEQBEEQBKHciFAxJZSJqRBLhSAIgiAIglByRKiYFjWJqRAEQRAEQRBmAxEqpoSS7E+CIAiCIAjCjCBCxZSQ7E+CIAiCIAjCrCBCxbSIYipEqBAEQRAEQRDKjQgVU0IsFYIgCIIgCMKsIELFtJBAbUEQBEEQBGFGEKFiSqhQqNAdSSkrCIIgCIIglBsRKqaEuD8JgiAIgiAIs4IIFdNCUsoKgiAIgiAIM4IIFVNCLBWCIAiCIAjCrCBCxZSIFr9rS0yFIAiCIAiCUG5EqJgWkv1JEARBEARBmBFEqJgSqloFVRGhQhAEQRAEQSg9IlRMk3oNJKWsIAiCIAiCUHJEqJgmtbpYKgRBEARBEITSI0LFNBGhQhAEQRAEQZgBRKiYJrU6tEWoEARBEARBEMqNCBXTRCwVgiAIgiAIwgwgQsU0qdfRIlQIgiAIgiAIJUeEimlSq0v2J0EQBEEQBKH01DbiJJ7nnQc8Ddjv+/4Dwm27gQ8CZwE3AJ7v+ysxv30x8Gfh17/yff89G1HniVCrifuTIAiCIAiCUHo2ylLxbuBJzrY/Br7k+/69gC+F3wcIBY/XAQ8BzgZe53neqdOt6gSRmApBEARBEARhBtgQocL3/a8Bh5zNzwCM1eE9wC/H/PSJwBd83z8UWjG+wLBwcvJSk8XvBEEQBEEQhPKzmTEVp/u+fxtA+P8OMWXuCNxkfb853LY1kJSywgyitab3mQ+hjx3Z7KoIwoag2216n3gfen1ts6uyZeh9+VPo/bdudjU2BH3sKL1P+2itN7sqE0OfOEbvUx9E93qbXZUNQf/gu+jLLtrsapz0bEhMxRiomG2xT6XneecA5wD4vs/S0tI065VJrVZjftt2Osv7Nr0uW51arSZtOCE2oi07t9/CwY++lx13vDONxz1tqufaTKRfTo6t3pbrV17Kyic/wCk/8xDmf+ohm1qXrdCWur3O/vefy+Jzf43tz3v5Zlcnlkm24+qlF3DsY//B7ic8ndqZd5rIMTeb5pUXc/Tj/8nuxz6J2l3vkVp2K/TJLFb+5dP0lvex5xefuqn1ONnbcjOFin2e553p+/5tnuedCeyPKXMz8Bjr+52Ar8YdzPf9c4Fzw696eXl5glUtztLSEuu9HnqtxWbXZauztLQkbTghNqIt9fIBAI4dOcyJEt836ZeTY6u3penzR5eXUSfBu+dkb0u9egKA1YMHaZ2kdZ1kO/YOBNObleVlVH1hIsfcbHorgUf7ym23oradklp2K/TJLLrNJhw/tunXsVltuXfv3lzlNlOo+ATwYuBvw/8fjynzOeANVnD2E4A/2ZjqTQAJ1BZmkW538L8glJ1uZ/C/kI5pp9bq5tZjozDXWab+0Zmxe9jtQHNGrnUMNiSmwvO89wPfBO7ted7Nnue9jECYeLznedcCjw+/43nez3me904A3/cPAX8JfDv8+4tw29ZAUsoKs4gIFcKsEY7zWmLo8mHaaVYmac1m8L9XojHR9HlzbWWn14XWaqniYqbBhlgqfN9/fsKux8WUvRh4ufX9POC8KVVtusjid8IsIkKFMGNEwoQokfJhJqRrMzIhNddZpjHR9PWZsVR0odeD9jrMzW92bU5aZEXtaSLuT8IsYrRxZdLKCUIaHREqCtGZNUuFcX8q0ZhoBOnWjAiG5t7NihA1IiJUTJPQUiHmMmGmEP9yYdYQoaIYndmakOpWCYWKWRMMzb2bFXevERGhYprUQu8ycYESZglxfxJmDTPBastYn4tZ03Kb6yyTosX0+VlxYYuSC8zI9Y6ICBXTpF4P/ov2SpglRKgQZg0jTMhYnw+jaJsVLbe5zjK5hM6apcIs8ifuT6mIUDFNaiJUCDOICBXCrNE17k8l0kRPk24/yHcm3INbZQzUnjHNfXfGBOEREaFimhihQtIMCrOEBGoLs4ZYKoph2qvXg/X1za3LRlDGQO0opeyMTLLD95kWS0UqIlRMkyimQl40wgwhgdrCrCGB2sWw22mt3JM0rXUUd6BLJVTMmqXCZH+akesdEREqpomxVMjkSpghtLg/CbOGCBWF0HY7lT2bTnu9PxaWyHobrc0yK5r7yP2p5P11TESomCJK3J+EWUSECmHWiLI/yVifC1uoKPuk1L6+MikYZy1QuyuB2nkQoWKaSPYnYRaJ3J9EqBBmBFlRuxi28FX2Samt2S7TmDhja41IoHY+RKiYJpL9SZhFTOq9Epn6BSEVE7QqY30+BiwVJZ+UtsovVMxEBq+exFTkQYSKaSJChTCLhBodXSZTvyCkEAkTklI2H9Y7sfTZdAbcn8okVFgJOWZhjtOV7E95qOUp5HneHPAS4KeA7fY+3/dfNPlqlYQopkJeNMIMYTQ6xgdVEMpOR1LKFsIWvsoe+Gq7y5TJettxXNjqc5tXlymjez0w1hixVKSSS6gA3gM8CPgksG961SkZklJWmEWiQG0RpoUZQbI/FWOGArV12QO1IbiHO3dtXl2mjW1hkpiKVPIKFU8C7ub7/uFpVqZ0hJYK3WmjNrkqgrBhSPYnYdYQ96didNqBZrvXLb/mt6wxFe02LDSC6yv7PbSFwbJf65jkjam4EZifZkVKSWSpkBeNMEOUMCe7IKRixnhJKZuPTidQui0sll/zW9rsTx3YcUrwuewubPa7rOSWtXFJtFR4nvcL1td/Bz7ued5bcdyffN//8pTqtvWRlLLCLCIraguzhqSULUa7HSjd5ubLP0lrrUKlApqSCRVt2L0EB24v/z00961WK78ANSZp7k/vitn2Bue7Bu4+ueqUDMn+JMwiRqvTk0BtYUaQmIpidNqB0m2hgS77JK3VDNyE1tfLZb3ttGH7TgB0c7XcLt5GqNi2A46soHs9VEWSp8aRKFT4vn+3jaxIKRGhQphFzAAsbn/CrCAxFcXotKFag8YirJVcqGiuBm5e3W65rLedNmr7TjSUP87AvNMWt8ORFVhrBX1XGCKXqOV53scTtv+/yVanZEQpZUWoEGYIiakQZg2xVBRCd9ozE1OhW6vBBLRaLY31Vvd6wThvYipK7/4UCoPbdwT/S95nxyGv/eaxCdsfM6F6lJNqFZSSF40wW0j2J2HWkHUqitHpQL2OWmiUf0Jq3J8q1fJYKsx1LG4DVSl/nEHPcn+C8vfZMUhNKet53l+EH+esz4a7Az+eSq1KglIqMPHKi0aYJaJAbREqhBlBLBXFMJaKxmL5J6StZjD5rtbKMyYa74v6XJhWtuST7PC+qW3bZ8Pdawyy1qm4c/i/Yn2GIED7JuD141bA87zfAX4dUMA7fN9/i7P/McDHgevDTf/P931XwDl5qdfFz1aYLXri/iTMGFZMhdY6UCgJyUTuT43yT9Caq6jdp6GrlfIIFaa/1+vQmIF72BVLRV5ShQrf918K4HneBb7vv2PSJ/c87wEEAsXZwDrwWc/zPu37/rVO0a/7vv+0SZ9/Q6jVRXslzBayorYwQ2itA8VRtRr0/dC1R0ih3YbGtiCmYq2J7nVRlepm12o6RDEVtfKMiWZOE8bF6LJPsl2houzWtTHIu6L2lzzPi0sduwbc5vv+qNFH9wUu9H1/FcDzvP8Gngm8acTjnXyIUCHMGpFQUY6gREFIxViiG4tw/Fg/XaqQjJVSFoBWK3ARKiPNZiA8lShQe0CoaJQ/2D6yuoeB2rpV8hS6Y5BXqLiOwOUJAjclbe3reZ73CeC3fN/fN/TLdK4E/trzvD1AE3gKcHFMuYd5nnc5cCvwB77vX1XwPJtHrQbtkmgnBCEHWiwVwixhJljzjb5QIaTT6QTvRpOWs7VaSqFC93pBytwwUFuXZUw0c5paLej3zRObW59pE943tW2HxFRkkFeo+HXg0cCfE8RS3AV4DXAB8N/AG4F/BP5HkZP7vn+153lvBL4AHAcuB9yn7hLgrr7vH/c87ynAx4B7ucfyPO8c4JzwuCwtLRWpysSp1WosLS2xvNCgVlXs2uT6bGVMWwrjsxFtebhWZQ1QvV6p75v0y8mxlduyd2SFA0Bt+w46B/eze+dOqns271q2Qlsu97rUt21n/g6ncwQ4tbFA7SSr8yTasbd6ggPAtqUlWvPzVKvVUswF2kcPcgjYuXs3rVN20Tm6ktpWW6FPprF263YOAzvPvCOHgcUKbN+k6znZ2zKvUPHnwD1932+F36/zPO83gWt83/9Xz/NeArhxELnwff9dhKt3e573BuBmZ/9R6/NnPM/7J8/zlnzfX3bKnQucG37Vy8sDuzecpaUllpeX6SpF98QJNrs+WxnTlsL4bERbdkN/U93tlPq+Sb+cHFu5LfWhoN6d+hwAh/bvQ+nNc47YCm3ZXVuj1+2yvh7oEFduvQXV2LHJtRpkEu1o+saJrkb3NJ1m86S/N3nQBw4AcGy1ha5U0cePpV7XVuiTaehDhwA42mxBfY7Vg8u0Nul6Nqst9+7dm6tc3nUqKsBZzra7ACay6jj5BZQBPM+7Q/j/LsCzgPc7+8/wPE+Fn88O63JwlHNtCpJSVpg1opSyJTH1C0Iapp8vhK48Mt5n0+2E/vgmpqKk7iQmgHmhEbgKlWVM7FjuT43F8mdDMi691WpwLyVQO5G8gsBbgC97nvdvBO5PdwJeGm4HeCrwzRHr8JEwpqINvNL3/RXP814B4Pv+vxC4VP2m53kdgriL5/m+r5MPd5IhKWWFWUMCtYVZIhQiVGMx8Ldui1CRib2iNpRYqAiuSzUW0ZVK+QK1TbD9Wgvd66EqefXUWwwTqF2pzkYa5DHIJVT4vv8mz/OuAJ4D/AxwG/Ay3/c/G+7/GEGsQ2F8339kzLZ/sT6/HXj7KMc+KajVpQMKs4URKnSv3C8aQYC+EGEyGYmlIpu2tfgdJc6mE1kqFsvlteCklEVrWG/1hcSyYSxM1cAyU/oUumOQ22UpFCA+O8W6lJNaHTpHs8sJQlmwF73rdUGECqHMmAlWQ9yf8hCs6+GklC1rSlLjJtNoBK4za6308luFgZSy5h42SytU6AH3pxlw9xqDXEKF53lzwEuAnwK22/t833/R5KtVImp1MYcLs4W9amy3GzwDglBW7JSy9nchHqP1rdX7bVbSSZo2XgrzQUrZsqyorduOpQLCe7hn0+o0VdyYipWtG3Q+bfJaKt4DPAj4JFB0LYqZRtXqaHnJCLPEgFAh8URCyem47k/S51OxtNyqVoO5ufK6CBthyayo3SuHUNEXDGuohUb5126I3J+qqIXFvrAoDJFXqHgScDff9w9PszKlpF6Tl4wwW9iChARrC2Wnba2oDWKpyMJeOA0CTXdZs+k0rexP1UppLBXEWSrK6sIG/QD7SjVw9xKhIpG8zs43AvPTrEhpqdXlJSPMFmKpEGaJyFIRBh2Lu2s6tj8+hNl0Sjohba1CfQ5Vq6OqZUopa2V/sldFLyt2oPbCYrkFqDHJa6n4d+Djnue9Fcf9yff9L0+8VmVChAph1nADtQWhxOgopWzoBiKW6XSGhIpFdFknaa1m3y2uRDEVg9mfguvTzWY5M3hB/z1WrQTX22mjO22UxAsOkVeo+O3w/xuc7Rq4++SqU0Jq4v4kzBjdMONTr1eel6ggJNGRxe8K0XHcnxqLsFZSd5KmJVRUyypUlHytEbACtWuWZaYJ20WocMm7TsXdpl2R0hJaKrTWKFVaOV4Q+vS6UJ8PJgpleYkKQhKSUrYYxrJTt9yfDh3YxApND91a7feLMgVqtzugKqhqFb1Q7gxeQF8QNillIXCB2r5z8+p0kpJ7nQrP8+rAQ4G9vu9/0PO8bQC+75+YVuVKQa0eLAzT7fY1M4JQZrqdIKOLCBXCLDCU/UmEilQc9ye10ChvNp3Wan8SWqZA7U47SEJDKBzW6uWOM7ACtSM3x7L22THJFajted4DgWuAdwDvCjc/GjhvSvUqD0YbIy8aYVbo9qA+F34W1z+h5DiB2uLumkHbialolDgcESBPAAAgAElEQVTw1Y6pKJOlotOGquX6U+ZgewjeY6qCqlT691OEiljyZn/6Z+C1vu/fBzCz4/8Gfn4qtSoTNREqhBnDWCqgPC9RQUjCTJLnwwSJMtanExOoXdoJWnMVZYTNSrU8SpZOZ9DzolHiewjBe6waTpcXZiDb1RjkFSruD/xH+FlD5PbUmEalSoUIFcKs0e3C3Hz/syCUGTO2V2vBeC8pZdOJhAqzTkWYTaeM7dZqBusaQPkCteuDlorSurBBcN+qVmIBKG/GsjHJK1TcAPysvcHzvLOB6yZdodJhBs4yDpiCEEeva7k/leQlKghJdNrB6tBKBRMtUSClE2epgHJqupurg+5P3S5a682t0yQI+3xEmV3YIBQqqsFnsVSkkleoeA3wac/z/hyY8zzvT4APAX82tZqVBfPglcXsKQgp6F43SEwQWSqk3wslp9Ppa21lXaJMtIk5MW3WKGf2IN1pB33BDtSGftDvFka7QsV82WMquoH7GkhMRQa5hArf9z8FPBk4jSCW4q7As3zf//wU61YKlLg/CbNEN3xhiqVCmBXsCZYIFdnEZH8CyjdJM9djp5SFcsSZtQeFClX2mIpup3//5heC/80SX+8Y5M5x6vv+JcBvme+e51U9z/sL3/dfO5WalQXz4LVFYyvMAKFlQtXnguCrMrxABSGNAaGiJmN9FknuT2VznzHXYwdqQzBGGqXLVmUopqLk7k9WoHaUAarMlpkxyOv+FEcN+NNJVaS0hLmcRXslzARGiJgTS4UwI7StTDi1euAaIiRjhC47pSyUb5IWrhKu7BW1oW/N3cp02n3NPQQubKW2VHQHr7fsQtQYjCNUAMgS0VmI+5MwSxghoh7GVIilQig52rVUSBxROuZdaBRuCyXNpmPcYxquUFGC/tHpDMZULDSgvd6PlykbdkwFhJaKEgtRYzCuUFGCNAZTRoQKYZYwL8wwULu0LxlBMNiuIJJSNpsh96dw0r1Wsklay3F/ioSKEiha4tyfoHz3METb2Z8AGovoslnWJkRqTIXneb+QsnuLOwVuEFFMhbxohBnAmPajxe9KYOoXhDRsVxBJKZuNva4H9DX5JQt8jSwvZQzUjlv8DgKXoG07NqdO06TbGRQqxFKRSFag9rsy9t84qYqUlvDB0522+IoJ5cdYKqLsT2KpEEqOm1JWJhvphBNSpcI34twCKFW+mArTD+ZDoalSJvendj+zJaAWFgO3lbLdQ0OvN+z+dGRl8+pzEpMqVPi+f7eNqkhpidyfSjCQCEIWEqgtzBqddt+Fp1aHztHNrc/JjrPGgapUgjSdZYupaLmWihIFajspZaP+XzJrU0R30DKjFhbLvYL4GOROKTstPM/7HeDXCYK+3+H7/luc/Qp4K/AUYBV4SZjedmsgMRXCLCGB2sKs0WlDbWfwWWIqsnEXToPAJ79skzQzwQ7XNVDVaqDNL4mlYjCmoqRrjRi6XahYIciNxfJaZcZk3EDtsfA87wEEAsXZwIOAp3medy+n2JOBe4V/5wD/vKGVHJe6CBXCDOEEapfiBSoIaVhaWyWL32XjarmhnIGvrVVYaASWGChfoLZ9D0NrTOnuoSE2pWwTrSVXkcumChXAfYELfd9f9X2/Q7Ba9zOdMs8A/t33fe37/oXALs/zztzoio6MWCqEWcIN1C6DqV8Q0rD9yyWlbDaulhsCTXfZXGdazb4GH/o++WWw3nY6w5NsKJ8Lm6EXk1JW92B9bfPqdJKy2e5PVwJ/7XneHqBJ4OJ0sVPmjsBN1vebw223bUgNx2UC2Z/0j35A7/MfpXLOH6Lsjp32mxPH6b3ldbB6IrvwwgKV334N6tQ9uevU/ce/hltvyi6Yxql7qPzun6Nq+bqhbq7S+7+vhRPHo23qic+k8qgnDpY7ukLvbX/ZH+CUQv3S86g85NGD5Q7cTu+f/qb4wFCpUHnBb6Du+6DcP+n910dgbo7K434p92/0Fd+m96HzoBdqQ+bmqfzWn6BOO2Pw2N/+Ovrj7wOjNVloUHn1a1GnnJr7XAPH++In0F/5TPzOapXKi16Juuf94ve7K2o7Eyx99eXo87+Ietnv9wM1zXk//G9wp7OoPPSxI9XbpXfeW1APfTTqfj+dWEb3euhz/w71i09H3fO+g/t++H167/mHRM3icrVKN25fvR48q3vvknzeE8fonft3VF76O6hdg8+dvvgb6Gu/R+X55wxf07+8EX3T9dF39cCfpfK8Xx+6pt7b/wr23Zp4/lwsnU7ld16bOubo712K/uZXqbzs93IfVh9doffOv6fy63+A2nHKwL7ehV+Bm26g8pyXjlztgeN9/qOgofLEQV2Vbq/T++e/pfLsF6PueNfxTtLp9NdciHF/6n3ra3D9NVSe+/Lh+v3bW1FnPwp1//Q+2nvbn8OBfdE29ZBHUXn6CwbLtdv03vI6lo8difqlevQTqTzB1dM5dfjof8AdzqDyiF8cPJ7poy9+NWr3UuoxiqDj3J8ai3DtVXT/9BXDP6hWqbz4Vah73Cf5mGtr9N7yWjh6JL7A3rtQfeX/Htrc+9C/wV3uPvRuGCr3zv+Dvv7a+J2L26j87utRbtaj5mp/sg39SXiK0Kmvv5beZz5E5RV/hKoOPne9T30Atu2k8tinpNf1I++BM+9E5eGPSy2XWIdDy/Te8zYq5/wv1Lbtw/u1HhYMjaXio++l+7mPAqDOfiSVZ/zK4G87bXr/+iYqv/Q81F3ukbtOvQ++C33Ft/sb7nQW1d/84+Fy//529A+ujL6re9yHyq/9bmw59aCHoB704HwVGEopa7l7he5tcegf/YDe5z5K5TeG5269D78bfemF8T+cm6Pyij9Gnb43X/1OInILFeHE/ynAmb7vv8nzvL1Axff9m0c9ue/7V3ue90bgC8Bx4HLAfeLikiYN2Zw8zzuHwD0K3/dZWprcIDgKtVqNpaUltNbsBxbn5tg+Yp1OfPXTHP/OBexpNKjs2JnrN+vLt7Fyw7XUH/AzVFOEhd7xo6xfehE7j60wf6975zq2bq+z/7KLqJ11L2p3PivXb1w6t91M5wffZfd8PbV+0G/L9nVXc+j6a6jf/6ep7l5i7ZILmbv2KnY9a3DgWrv1Bg7/+DrmHvRgKjt3sfbtbzD/o+9zylOfPVCude2VHLn5euZ+9mFUFocHzyRaX/8CjVuuZ/sj8w/aB6+4CLWwyO7n5p8sHfvxtawu72PhYY+lt3qc9e98kx2HD7Bw3wcMlDvyo+/TOnyQhbMfSe/ICutXXMwpq0eZu4frSdhvyzRWvn857dYq886Aq3WPtW98icVbf8y2hz4q9rfr+7azAuxcOo3DwOLCwkC/P37jdZy46L857fdej5qfH/jtgW99jfrxo+x62nNS65cH3W6z/5tfpnHGXnY86vGJ5XonjnHgO+ezeO/7sf2hjxzYd+L8Gzl+203MP+JxfRcGC6XUkPm711xl/eLz2b5ygMZP/kziedf338zK9y5jx8F9LNxz8Lk7cs2VrF30NZZeNTgJ0r0e+79zPrW73oPaXe5O+5qr0Fd8i6Xf/hPnmo5z4LsXU7v7vandMVmwSaNz2010vncpexbmqezclVju2I++z+qFX2HP778e5WqfE1j78TUcvvpydh5ZZv5uwcTC9MvDV19O++rLWfrNPxyp3i6HLv8WWmv2/Mqg4NW55ccc/O7FbHv4Y1h80M+OdY793Q4L23eyc2mJozt20Op2B56xIz+4grVLL2LplYOTIN3psP+CL9E4/Qx2PDqljx49woGrLqV2j/tQ23tn1r93GZWrLmHPr7168Jpuv4WD11xJ9T4PpH7aGaxd/m3q11zJqS/49YQjByxfcj61u9ydXc943sD29dtvDProoX0s/ETyhL4oK5UKvYUF9lhttPbLL6D19S8MldW9Hmvnf4nF225k20N+PvGYnVt+zMHrrqZ+nwdSdZQunRt/ROeyC9mze/fQc3zg219nbm116N0Ag2Plvu9cQPXMO1E/654DZborB2lfeQmnNI8zd9fBvDYrvQ69HTuj61zfvZsV4JTtO5hLGINPfP2zHL/sQnbXKlT3DJZZvuSbVHcvcepzXpTYDsE1fY36T9yfXU9/bmq5JFrXXsmR713GKcdXmLvrWUP7dbsdzGl2nhKN7VprTjznJXRvvwWA9e9dTuXKS9jzst8B+m3Z3X8by5ddxLafeziLP/OQ3HU6cMW3qChF/SfuT+em6+lccgF7Tj11SPDaf+k3qe48lfo97k37R9fQu/TCofed1pr953+Rhe072Pm4J+c6/0EF1cVFdoXHap52OkeBUxfmqaW8T098/bMcv+QCds/Vqe7aPbBv+bsXo3WPufs8cGC7eYfsOLSPhfv/5NAx87zDN5NcQoXneY8GPkJgRXgE8CaCGIc/APKrXmPwff9dhKlrPc97A4ElwuZm4M7W9zsBQyo43/fPBc4Nv+rl5eVxqjU2S0tLRHWo1lg9eoTWiHXqHQx+d3Df7ai19Vy/0bcHhpzuU59Lz9HADpS79Ua49CKO7LudSs766WOBNqj7sMeif+FpuX7j0vvGF+C6qzm0fx+qm+6XaNpS3xbc9u6Tn0Pv3g9A33oTa0dWcO+1ufbO038FdZe7o3/4A1qHV2g75Xr7A81f59kvHdL+p/Ltb7B68GCh+9ltNkEzVNc0eiuHYPtO2i96FfrgfvjONzm6fx/H3etYOQS7TwvK/fD7cMXFHDm4jIo510C/TKrr0SNw57vRftGrBrZrreGCr3Di4DLNhGPoQ4cAOLoaWIlWjx0daKfe0SAzzvItN6J2DlpSeieOx97PUdDHg/M0jx1lLeV4+miQFnD14PLQ/ewtL4NStF/0qlihIq4t9cH9cPH5HFs5xIm084b74u5n9/AhaJ7gwIEDA9Yck/e+e/aj0E94Jr33n4u+8CvDdTh0ICj3849HP/IJiXVIo3fBl+C673PwlptR68ma1d6hgwAs33ITans+hUdv3+0AHN13e9RHTVt2j6zAieMT6QMA3WNHQevhNro1GEuOHzjA6pjn0u11Wp0O68vL9DoddHt94HzdI4dhNeZ+njgGQPNoRh89ELRX95FPRD/iceh3/B861/8g5pqCCV3jGS/g+D3vj37zn7K+eiL7eV9bo3vkcOI4enT/7UN9dBy6J04AavB8d7tP8OcQjDlf5sTBA4ljDoA+sD849mOeSu9nHz6wr/dfH4Yf/5Dl229DzQ0qMnrra6wdj+9v0Xun3YZOm97PPoL2U73B8153NVx5CUduvw21NOiV3T16BObmo2PrY4GF/cihg7FjM4RjDnDo1ptRelCf2j1+jG6tnnk/g3F0+H7mxbwXj9x+O+r04WOYuInV9fbgmPmEZ/XLvOvv6Vx3dVSHqC1vDaZ3xw8fLvTc9U4cR539SNoveAW9L3wcbrguGHMsZaDWGr16gt6jnkT7mS+k94n3oT/5AQ7s3zdgJdBrLej1aK0cYj1nHbpra3Q6nf697ASWwJXbbkHNLyb+rnfkMACHbrkZ1Rl0Be6eOIb6yQfTftFvD2zXKwfh4vM5emD43QD53uHTYO/efFaTvDEVbwGe6/v+k+hbEi4iCLAeC8/z7hD+vwvwLOD9TpFPAC/yPE95nvdQ4Ijv+1vD9ckwbvCeCX4qcgw3nV0SxjxbJMCq6awUOgqjuIWZzBLG9NiIzxii3WtPytSQt41cFkbI/NDpFE8rbJvPU3xWdRgQCEwmhqfVRMXcW6VUGKCWcu3GHahaC/5cU7+pl+M/rbvdwA1tUj65zZzPTLsTWx9gONAyD3nb3+yP7ZfNwJVtrTW43X3ukoIFzbWM8XyqvD7S5vkrct/Ca9Zxv2k1YX0t6A+ToLma/uxPIrDUXggsHOsH7klzNXgO3D6Rt4+GbayicS9h4a3wWtTituB7vZ5vzOm008fRScc6OOk508g15kD/OXbdquxtce3caQfuWGmkvScaKe9PN6aimiOmImrzhD6b0Q661wvOO844ap7PpGcjra0NSe9I05cKvJ+01uFYbMa9hPS16+vBehLuO7MVP44WCirvdlEDMSQ5s12Ztopti9WEPrW10/PmdX86y/f9L4WfzWi5XuD3aXwkdK1qA6/0fX/F87xXAPi+/y/AZwjcrq4jSCk7GWfbjaReG2+dirwvHwudd+LfyDl5sDEvr6KTcQtVrwcdaYxrUvMN9PL+4YLRpCp8OOcb0IyJLYmO1xjel8ZCo/ig3WlDp9jjMiAsRINYxssrHOh1uzP6YovN1eQ2yUqlZyaDlWrwEnUDtZMm02ZwnlRKQnOcLKE1bXLfXO0vXJWXokJFXD8y21rOfRgSlq1gQduvN3o+C9bdJq2/WUTPZJH7Zp7PpDY3x4vx5y5MazXGWdY6z5hCbORfHgVq1wOBsNvtT5ztiaJZFNLentVXYoXJ5AlbxZTLm96200493sRTZ3ba/exweWg0sutg2rAeM8ZmCBWF298mfE50c3V4vG2uDipn8mR/sp99i2Bi3YT5jHYwiohxxtGsZyOtrQ2NhMD7URSk7fWgzcJxTzUSFtqLxkdL6Wi2G0HbLlfk2R8K1M6pjE0Y5yMlWlyf2uKLQead5XzP87wn+r7/OWvbLwLfHbcCvu8/Mmbbv1ifNfDKcc+zqYxpqYgWWSlyjDVnYp2EmYwUGYRaOY+dxigadfeaEiwV0cO4YE2+VmLMha0m1OoDK4PmorGIXis4aHfa0C4og7ea/YG0Wg0yKsVebxNMULYZ6MexVKw1k603C430RX+MZaJqhApHmDaTHPcY0xIqck7uY6+pldIOSUQWuHQlgk5qB3tbQhupIUHTCRacxPMZvZAz7sco9y3tN/a+MYUK3esFkyyt0b3egMVJT6q/dbuBEGHuu51CPBIqrHPZ8SmR4JuhcHLHvYVGpGG3xy6jeVWL4ZhRq2dr4SF4JuPGs0k/k/b5CsSwMZ8x5kD/OY+1VIT3wWln3esF9y9L4ec+dzbmHRPbfo4mOk+g9lpCm4f9eCrPY9Ixkt5xaW1tmG9At4NutwdiraL7OIqHQty4N065Im2UEKitm8105V0nYZxPmZ9Fi0Fu0TU/8tr1/yfwn57nvQdoeJ73r8C7gclE05Wdsd2fcr58bFxtfQJBB86hCYo99hjuT9URJr9G2m9YGrskzX2tFg1mKmlhJXfQz8solop2Do2Yi2sxCN1d4spFGrHqeO5Pkfk86d4uJLheGHq2+1N12NSfpKEfRXuUhjHhj+GGpF1LQR5qOft1J8UsnuR24grLSS5KbrlRCH8b66IUd64i40eaW80k3ZLWW/2MaK4rWZaLR14irW343MVNHBO0z+b6s/qodse9JIHPTH6NVrZWy+yHkaUlyd0mrt7jEpdSNo1GwrjnHhMG05wakhRY5vs47k8JrjiRZSEmpaxOS7NtXHOSnv1WMxijs+o6zj3LGovT2tqQ5BY2gtdF0XHPvAsTXThHsa52O4NCRUFLxdA4kzU/y+Pyd5KSS6gI14d4EHAVcB5wPXC27/vfTv2hEDDuKqumc3ULPohz80PZEWJJ8tFNoB+zMIYmNNLoFRCUWqvBQGZeEmG9hwZZ2/8Skl12ms3RtLlJFpI0up3i+evd2IZEIcoSjiJLxYjudmYilnRvG+mDXeQLX60EL1HH1K/DNhgeZPsvw0ksKBRNxLLawexP8u8vKHSqahVUJfu85lmOFRIT3E7M9vDeqIQX99AkdBQaCVo+l2gSVEQpkeLiYa5xEi9Uu20nMbGIw9XaxsWKRXEnzsTTXH/WuFB0UtUwQkWOmArzfMbFsUzIRWwIx8KSSZ4YNnOdccJKLeFdY75ntVGKEi2wIM8P18/17wfL/SnlfK2MZx+GBeSBcjmFjzSi8SfJUmHShmfEVNj1MeTt8zH1UXFuTQPlYtxDIcVSUdD9qeqsUxF3bJfo/RIvJCa6kG/hxSBz+2P4vn8LQdYnoSh5zdBJmM5VRDBJCgKKo6jmfdRYBJtR3J9CISDKoGLOv9YavFZXQxRq17XWg9lX1kYTKtR8o9gkCkZzfxqyVAyfN5qIGd//cQO1MzQoQRzLvth9QHagdqL7U3hdvV7wQp4v4HMdR17f3STzNATtP8paH/VsDbGxOrovjsBlJ35iHbkOZE4uJ2BJzPvSTHLXSKEfABwzEdO9wsdLxD6G68oxSoB5HK5QUR98/nSnE/iEw/AkJq8riPNMqkYj9CmPuSalAjedE6v5LOQdR/ixXc7C42e6HhUlbp2KNBYS3FcttHsfLFS9Fh+/l9NSEfXXRK1yjFIuTvGWJ1A7Sdi1+07au92U0zpQEI0wBiQ+n4Yc7k/9uAfnOpo5+7xNwrg3FMeSND66Y+zI7k/W+7s+F9zPrPEj6f2SFfO6hReDTJzleJ73XuJD3AbwfT89abIQZuGYgPtTIa1+M3+Q6cJisRfH2gQmLaNMfoeEBcsNwBpktbvo0MJiP5uO/fsigpdNQUtF33e3YMYLN7ahsTg8OTITscaEhIqsCWnWtRsholKFSiU5UHtIc2NPAFcnIFTkfGYyYhvismBlUmQyF+c7HaJbjr+uO7mJhOokN6kxhP4cwYIDloVC7k8Jk1X7GJPQ0rkTsZg6jC28mP5jZ3+C/v217s3Q/SyY/SmKm0kS+EIlRKQ4GVOoiCaX04ipyJn9CUIhKiuGrZ0y0c1yf8qa4JrrT7LexrmqxE0a8wRqJwm7aQJyUrk0N9Y0soTJtLY2JCV6yBvrNvCbfOPekPBnguiTxtG1FrrXzbegcHcwUDvKSpaVyCIrMUmaoFo0bvMkIc396Trgh+HfEeCXgSrBuhEV4BnA4WlXsBRUx8z+NELGBF3EdSMro49LczWYcKSsJJmJlaUoL9qNgUgyg7aagy+ARtIAtzraoBsOJrnddPL67tqYjBcDwlGMRcl1jRh3Bfcss2xSXIfBmNyr1fSUskkuOzAZDU3e1IVpWZhGjbkpMplLmoyY88ftixIV9IMFB8s1YW4un+tjAqpSybZgdtr9+1vkniW6ePS/F7YExpHWlpOKFzDjevjcKXcCO9CvE+qQx6ffTm2c5M/tjmd5LGYDQkXCxGfi7k+dgpaKAjEVowgVeWMqUhQtQxPwcEI4ENwdxdvkSCmb8mykup8OuPyNOI5mPRt5ArUjK2q84qCI58ZQJsuklLKOe2hiZsuBcSFnG7mB2qYeWW3cThjn0+J0zPYtGlORqC7wff/PzWfP8z4HPNX3/a9b234eeM10q1cS6vUgfdgI6E4n0EYTPIi504SmpQV1WWjA0QLyYagBsV2JCjNKliLHAqEWQhNr3KBhu63YA9yuwXLqjDtTmEajmJuOucZeL79mJMZ8ruJeXo4fqapUgsFvZPenrBdooEFxs+lEONmf9FCgdkIMw4CGbYJa6rwpZTvtgUwlkRZ+FG1/nhiqJEtFqnZ9Fepz/Ylr3snlqGRp4tIm7WlEvvpplopJuD8lC6o6qQ5FCWNj1FBMRUwgfpKffJ6Upm6MGMPuH9rtr2FMhev2OYDdTxPjTiY8uekWdH8KraOp1+EGzNskKVrMPcrT/mlKtDjhOy6uqZJuqYiSZEBy/A2k349JWPtyp5RNuYdG4dFaTbDOFYylBCcTYkwciyv8zacoE6P6NPNlInNjKiA7EyJkK9ESxmm1sHVjKvJmf3oocKGz7SLgYZOtTknJEzCXhG0CK+SHmF+oUFkZfcY4diITcX9KNrHaGiKVZood5Tpy5vCPsK8xr2UmLrYhxVKhhiYT41kqUs2ykBwsGMVUmJSyzgs0yd0opyYuNzldSwY0Znad1tcCt7JRhYrMmIoEs3iaFi2p/8cJaOM+n+HxU1+arqtFXpI0oaMeLwG77sOuVuH3tTECWmF4ITBXWZKmOTZjex73G/t+Jk6WYoQKSH/32PuS7sfE3Z+KWiqs9ViSSNOe18e1VDQH3cri6pekHCgSqL1uuT66ri95LRCTGEez7nsuS0W8i140UR4npaz5HDfuVSrRWjCqVgs+p7na5ujbuhemjXaFijweHkkpy/O4P23RmIq8QsWlwBs8z2sAhP//GrhsWhUrFTlS+yVid8aCE/Dci9MlpVxNYEgjNgp5U2/auNeUmN3BcVuxYy+c442UwSrpeEnYgkTe620Z87mb/SlpcHLyoY+aUjZzsEswKRt6bqB2fErZIS3MWrGBPpO8vrsDWXpiJvQjxVTUCqSyTZhoxu1znzsTLOi+uF3N9qhkvdjSrCppJE1aJh1TYdc9TYBJy6aThbsQmJvSOc0XPq/gm+j2mTHu5Rljk4Rqczz7/wTQ3W4gIKQtnOaSZ3X3cWIqcrmfJT9PKmZyqV1XHMgO1E7LVmYdfyqCftwxEn6v3TiiOBKtqCPEVDSbQUY9e8HEuHdhOO4NCH9xAl/auBCHvaCrTZ60r1Hq8KJCRd86t9XIK1S8BHgEcMTzvH0EMRY/D0iQdg7UOCll7U5bNLgp78SiUTBGYFRfc5s8WrS488YEasfm9I5xFxjw2e60g7iFESZfSak8E7FTAedNCxznc2kWvYqbCNsvr/oYlrHoZZji62mf16XjBmo7L9BuwiBr35sJTGKiY+RNKevWaZy0rHksk1Egb2vQRSzlhedOLhODBSfxfEK2Js6qa957lhbcrdMmVaOQJvRMyjI2lFJ2MKWzTqlD/pSyrqUidMMZ8il3xr081mBr31Acizn++nrghjsJ8mi5XfJYhjuW26VLGMswJOjnTCmbqUSLfQZjJo1ZgdppQnVe99DWeONo9F5MO0/Xsc7FMR8mekhycy2SUra1Cg3HUhTnChw37sUJfEWVIXZGQ4tcHh5p6zKlpfxv5LDOnaTkUhf4vn8D8HDP8+4M7AVu833/xmlWrFRMwh0Fck8Uoxd3Xi38wmLw4LTXB7UBSTRXobEtu1waI6WUdS0Qwy8b3e0GsQ4xbiID/p1ZmoI0ktxOkrAtFbndn2LckGwLic4t7zsAACAASURBVPH9j3OTmqL7U2IciyErUDsxcK0Ji9tg9cRkzL55NWK2kBcTIxC7im4WebK9ucJM6NcbvfAWtw1P8uIUBXHWhGYT9pxWvN4uCw1YOZi836prbq2ocSsL7/XAirujHC+N5mrwLMRlsWqt9vvbOEJsRkrZqE8tbktemyUzpezqQIxYFESf4fZZ3P3JEhJNamPTRmtNqO1Ir2ceRhAqovSkaeNCtw3VWnycV9KaSHlj3bKE9LhnMM79KSOmwu4rsZPQxjZonkh/7zTHHEdNHwjrEBs7lyelbKTwSFAeFU2PHzvuxSgl3PF6YTFGWC747NsuvTZ5skAmLXKalXnSts6NkxBnE8hrqcDzvFOBxwK/ADwm/C7kYZyUsqO4P5msLLlTyubMSW/XaVIxFTkHF93rBhOS+WFhIdYPvREzGR/QAmdo5NPIu5qmwb5vOe+hjnspxWWxiis3TgxPOBFLXJwqKY2podsJcuWbgPGkFbXjXHZ27Qn3TcL1JZ9ryYCQF6fBmnZKWYh3e9i1J95kPvTSbMS6ko0kDDmohZgUxhaRpjCurknY1wfxLme79kxmbQSzDk2Sz7upwziWCtftxmS1cwM0d+1J1tpmPautJsody5N8yhujWyoGjmdcwqL7NCH/7jzpSF3yWCrS4jSy3J/M75PIet8tNKDbGbQgN1eD8S/074dQGFQx1lv7PBD/PDVXg3S/9bnUdtCtVThld3i8Efq1/XyatS5c3DiiJGIF35zjskXselJxx85brtWM+nWucaaXIFTEHdslMSFHM31+VjRu8yQil1Dhed7DCFLLvgL4SeA3gB+G24UsxtAcD7gEFPTHz59StmAHbjX7q1uOSOEsRTHXpGr1cJCNMQ27qVjtfdbnkdYhiDJb5HzRjiBUxAlHyrZUDJUb9KXWo7rbuWtjuJj6JGnB7CwZcYHaaXm7d5wSGyMwEnktFbb7R5z/+yj9vJadQjoxQDyaWOyOnVgM9dc4bdmo66+4NDJSeTZTJsxJ2BMn+7s5XrUa9INJBes3Foc0yZE109RhjHzw/UXXnHUq3IQEp5ya7Fedx6ffvZ+OFlhrPewemhSgbGPvW3PuBUxG8Io7X4F1KnLFsHXayXEaeYSKtDbKilGKcwltrcJ8THB3nKLF/g3EChW61QzqkOVy02rCjp3BNY8yjq6lPJ8GN44oiYXGwPwl6KOjxFQM93+Vd9yLK2cJFfksFQmudQuLw+6rLpFlPuZ+pozRfRfrCQnzG0jeJ/stwG/5vv8Bs8HzvOcCbwMePI2KlYqxArVt96ecxyioZe27tOS1VGQMsnkpImwlrfTsagvCa7cDulW9PjzIxgXS5SUpB3cSowgV0fWmx4YEE7HaoNZoHPenrFTEJo7FTRVosBcJSgnUjnUXWDo9X/BbHqIsIzljG+zfYLuVjWipyEwpm2Ahaa0Ga0xs24Fe3j/4m7jEAguLcOyIU25Cz2fCSvRuvdWpe9A3XJPvmPZvYNjlbGEx6Odpq7bnRBsNs1JOSs6mU4cxXtxDMRWu+1NYh8VtcPjQ4G8ta1pSG/dTGw9Plgauaa0VaJYdpYuG9L6YlFLWvU+T0piOElORkEJ36LhZlgpnLBhQvKSNl1lKNNtVZccp0W9iJ43VamI8gRlz1Kl70O0gjkUZ4cs8++sZgr6xwBVde8rQjHk2djll8t5Dtw7r63332KJZHxcdd+uEWDK1dPrAJhWXwa61ijp1d/5nPzFQ2/LwSEpLG8XOOWnYc75nt+JaFXndn34C8J1tHwbuOdnqlJRx3VEg8AvO64/fGp5Yp1Ig8DjSNkxCE1pk8pt0Ta4/qxlA4twF4lwt8rqIucey6pSJ/fLKa0ForQ6kx0s8b6idHJiQjOFuF03EkkhaUMjQ7fYD2pxAba118BwoNZzK02jh85iUs67BTMSUAt0LNNNJdNpBOYgPkh7BjUjlTSkbndcWEkOzeNykoDU8uXQ1drrdDtp4QillU4MFW83AnWPnrkj4yMRc66kJ7k8LjaAfTMpa1Vgc9u1uDdZhrMQAzuJ3Q9YB83w6wrLWOtAKKxUIA0l91MSgDAmTjsY6bjzL4f4UWVqUij+euU+TSm8ZtpdKW+PAJY+7aZpQkbQmUl5lT1b2pxj34cTg7jjrrX0e6Le5azkKx8fU/mqPo6NMSN06xJ3LtFU1y1KR8NwpVTil7LCFNnjvD4w5cfOSuEXkWk3YtiNc66KAUOFebx5rgv1+sbPMZcXpJGW23ALkFSquBZ7nbHsOgUuUkEWtHgWDFSZyb9lW3HWmyOJ39u/SMBqxSUxacriJRCRdk6uxi8uaZL7HZRgaRThKSOWZyKjuT0Pp8foau4i4l9cYKWUzBcb5jL5irzxarQ2a+s29NlqdgUG27/8+tj/9+noYDByeJ60t2u3get1g3rGyP+VcydjUz+2/jWHhqp+tLKewPKnF7yB5cmLuWWNbfzHILFz3imbMRGwCgmVw7MAFJcmaOZF4AXchsLgVteOE5fZ68Kxk9dGkcc+dNMaNZ0VSyi5uHxxXWoNtNJEYF/t8I8VUpNShnSxUKJOJLjWmIr6NIiVaHkVL3HPs4o6JNq5roHM/1EIjPvjZPcYY46h26xB3jHYbarXsxW/dPhoFom8vpmSN0+rPB3EsQ7FprpJw3hlHe93gvRM3LiSRFKidx2PBHuedtkiNe4uSy2w9oSKv+9PvAp/yPO/VwI+Bs4B7AU+bUr3KhW1+nc+xmrJN6JtJfW4815k0rMlq5hrZcTELo1LI/SnhvK5vcdK1u4PsGIG4UWaLvJog29yddzCN9Q8dDpKO9c2s10fPvd9cDfz5E4gWFEpLNxgOvqpaHbQSmHu9fSecOBZdY9/6ZTT0Yw6kpm7mPJ0OJCU1MxrOOM2v61aWl7wpZcP66VYzeu76vtOLQSrPbjdIO5gUJzVUbzO5nIDQPzBZiukTzdXwnlkWtIwV5rXjXjGYkS3s85YWMnPikkZrFXXGHQGFbt5kbQ/rsCvGBasoWSllTYxSw/hfhy4Qbh9NSrGZME4FK+4Ou3MOaHSLBGpv3xnrHnoyuD8F7qu11DroTjs9TiNO0Lef0aT2j3ErGyIpEcj2mGxZlRRLhUmSsX3ncJubcX59DVaWk+sy7jhq3Ifj3BMN3XyLF6pGIz5ObftOWDmQv05xApptJajPhcJCjHtoYxE6nX6WOXsczfsOD4VAN/2rajRSn43IMr8nfMYH3BVjXBptiiaDOYnIZanwff8C4B7A24HvAP8A3DPcLmSRZH7NgxkkCsRl6KITiyKB2kmxDaNQxC0szQIRa6mIGVxiJ18jCkcFtKm5fXft37hBl5Dw8ooxzY+ZUjYzeD1tUbShQO0YgWrHzuhcwfYwW9lCgYE+DfN7c56sSVWtPnze1upwbvS85Eop2x5uh+i8i8Pm7zSh2g4WNJOCCQj9KkMTp03fK+L/G7lXLIXf41w8Foe1kKNgjtdIELy274C59Gw6mbjZjKrOWG+0rAuLg9l0TJuaPpDkDpLk9pln3EuIJRisv/VM2hpd9z5Nyrd7lOxPED6fGRrhtGPGxTnlsSDnUaJFiTtcy0JSTEWK+5P97MeM8yrFrWlg7aVRx1Hnvse6WmW1tcGNe7DH5Zyu3LrXi3X7HBpzWq3B7W65aBy15i9xa13EkRionWW1d953YV1zpfzPcjM+icmdgsH3/RXgPwA8z7s7sAM4lPojIWCUNRkMtumviJYb8mvh5wsEHo+TNcmlQJaiyDTvTLTVfMMx2ycIPfMNOLIyXG7UHNBOZotURnZ/cq9hId5NxwQHhqhaPXtF5ySyAsggXaAaCNR2Mp1EWlErmNGcE6ARvDT18u2j1d0QTRpPGTxvHJ1Q61atOosixZjS85IrpWwHdp/WP1d03tVgu62p2rY90vINBYzaGrvF7ckWjVHIih0KtadqIdTY5cmilJZSdq2JusOZQ1rIkYk0l4PxAgPWzPkUATkPUUxF8CoNstpZCqBWM4g5sSeKtnU1q48mjHtmkh1Zc+LGPaPMSltws2s9k/aaJHYWMpigpWKwvXKTpcTpZGjPa/Vha0QO96dcSrS4yWUzwY00JVA7cmV1XEyjbGWNBqylZH+ynn2VFXuRhLlesy5K3LOR4mo2QKjIi+Ie7HE5jHVLXPzNYIRwZ9yL1i4xx0wa92yF6Y6+NU41FtG53Z/C2L+4FbXtOri47zszPtpKtCRq9WAcKaulwvO893ue9/Dw80uBq4DveZ73smlWrjSMIVRE7hBFJoprOQZCm7m5wOc0j9QePbyTslQUvaaYQSPWrWl4EBoa9Ocb8Ysl5aGxmD8VpZ22tIhQ4abRU2rY3WWtOazFHGcF9zgTskuahmcgUNvRyoXXrlwNvR1kOhH3p+bgeVLaQnfagWXBadeslH+p5FynQs3NDwcLtoI1JoaCP9Oyn9nlxggwHyIrWNDELBTRqhm3su07hoOD3UnVGFo63en0F8FcaEB7fXjtiIUJ9LdOjH95rd7XxJogUzcWyShnsqxpSeNeFEQfxLHoOEWSWTMjR/YntWNnfErZxmKgzJh0TEWRQG0A190r7rhpx4wbE21teUZMS6oSLW5yGWdphlDR0hvejrUeg8l25Y6PWTEVeculYcdJmetwyWprw0IjsM6FrrjaHZfzvAvHHPeGxlG7XJrF3SayVMQHag8trmcI+1d0veZcOWJeY9/1W4S8M6rHAReHn38f+EXgbOCPp1Gp0mG0MqNM9IxJtGj8gVK5tfD91S/zuD9NMKaiSJaixMElxq1pfmF4dVT3+kL3lpEpYl7OoxFziVuTIO68cYHa9dGyjfUnYlnuT8l9RXe7gYAKwyllbf9tU3cYdPHIO9CnYfur2+eNw2g449zjRu3joVtfajYk40Iw5MbS7Pv7gmXeT3F/gqjNYieXoxKXGMDGuGYW8f+13crigo0XGlaO9jG0dPZk3M3SYitGFhbHzP7Uhqozwapbloowbmjompr5+mhkVRmyUDlWpNhA7QIram/bMZhNp7UK9bkgk9mksnFhKVXcNsuikez2A+SzVIyS/SmPK7GxIBv3FhMMHDdprFTRafEzWc9+Y3FQQI6pq4pc/kZ0f1pYtOJYhu+7LuL+ZNff9OU847JdH/tY7rHNMZPcPt22tMZHlTftbpRS1pku57ZUBNer3Wd/HDfjk5i8QsWc7/vrnufdEdjt+/75vu9fBZw+xbqVhmh14qTBJA3jjlJE+xyXOSiLuNRrMYyVNcmlaErZuflhc+mQFjJmkm3KuZO3MbS5qkiGmnZMXEEWSZou16wdV27UdVHyWrjS+kq309fouP7Dpv+G7lo6Tgu/0AjTzY6QKS0kmojtyOP+FAZ4DglrOdzAknCCdWMxLgRx57W1/+G9jtpqaBEoZ3I5zqJ9LlnuT3b6SsjnDmi7lVnCqe6aiVj/eGO9UG1N+5BftaOtHFeocBcBC8e1gQXpnMlNdD9NH03yMc+cVMVos+16mDqm1d8It3Yciz0+Zk3oi2DGgKyF01yyBJusQO16jKU/j1CRYwI4pFVO8u+H7JgKu/87/TVSutj7BupqC8uNINFDUeWSPe4lKY86nXzua0lWgqjP57FUWNdu48axJI17rjC/Zo2jeRWpvYSUslkxFZ3B992QlTJrDuVmttwi5BUqLvM870+A1wCfBggFjKPTqlipGCemInSHKKbVH0ELn9cHc9KB2gU097HnjNNCxg3mjUaUTQesINNRyVpt2GaUmIq4Ba9gQHvR14jFuD+NGr8DmQKjmk8xy/Z6KYHaZpB13Z+sF3c0ARwxe5V9vDwasXByPyQktmLcyvKSayXjTuR2ZSaYutMOftOImainBWrb+8dZtM8lKw975K6R8XK10HYmFysuSduTYlcIGIXIbcV2JbP629x8YM3MqUxJJE5Dbp4/s9jXwETR6fNZriAJrhJDlo/W6nC2srwrahv3PxjsR2bb/Ij++XGY8aBgoLbKcgVpt/vKuzjikoLYbqkJQp1OaP8h7AlqmuItbkFQg3nXLywMHsf8nx8WTgd/H1Ou4Grx7vMZe56cMRVRH7XHsEqlf/xclook97+c455jbdVDCoUi7k9O9qdqNXAdT5oHDFnm3bpm9akJCvMbSF51wcuAvwTawB+G2x4G/Oe4FfA87/eAlwMa+C7wUt/3W9b+lwB/B9wSbnq77/vvHPe8G0qUhWPE7E8Li1A7Bqsncv1Et5rFg0zzPmB5smHkpYhGPUlYsCcg23cGg0asht8qt21739Q8KkX8HTvFLBW610uObbBjOczEeyimosD6HzZ5/IdhOI7FxkopmxSorbbtHEhX2NfCNwYnsu4qqnkx12FSM6Y9d5128BIfcn8aw5JVzashrg1qyu0XqGtaT4qVMN+jPlHM9TGVlGDBKCtLI6auadjPsfUM6WY4tlnZbxJXbc+DPU6Zg9g+zWE/i11xtwhxriDm+VuzXGciP/kwfXDT6aNpmvJqdThgPS7mxs1W5maiSqp/tTY4Pu7cNTi5nESck30+KJ79KctVJTP7U1ZK2Sz3p2xXlb4CIEWwTw3UDl2PKtXBBAJ2APZ6KzEpwoA10x5Ht8Wktk3CHveS4liyrEKGIUHazGVyuOVF9ck37uki4yP0BX073WwSJlA7Lqg8zdphFnqcX0DbadiTBKW4Yx87kl7mJCSXUOH7/g+BFzjbPkywqvbIhNaOVwP3832/6XmeT7DI3rudoh/0ff+3xznXppJHYxRDZD5vLBZb0Kw1woS5sQgnjuc4djMITCwaaBeDqtWT/UsdkgJnh3JFJ127rdnbtj16eY7MwmLkpjMUv+FiBuFOzjSZaynm84VFOBwmXUsaSMNMJ1FO/Lzk8R82dUhdUTtsj6FA7fBezy8MrnVha+GLTFCTMBMxI5Sk9bFoUtVwsumMIXTW092fgvzloS9+YxGW9/frDQyu/eBMVFxhwQoWVOYYCyOmwnVQSmHWjBhi3SyCudhfDDKPVq252n/urImiNgqToilqE89jaQNNW7iTm6gOY0yY47S2YVINZU8uXR/wltHahn00aYJlfNzd+xkndMZZLE0dM+o/nE3HOt7CIhy4LfkYRRgrpWyWUJG2TkVCSlljVUrM/pRTiWYLPVFsQ7FA7ShGCQZiIgayla05KYkT6qoWFkdbg6XV7Gd+SlIedTtBgoks4mJDGkG8RqogbaGTtPpOHEuild1Nz+uklI3qVR/MoDhAUqA2pPfLSIDuv18gf8p/1VhEHxgzE+ImkPgUep73Qt/33xt+/rWkcr7vnzeBOjQ8z2sDi8CtYx7v5GNU9yez6moYOJU3/WqghS+m5VULi2gzuUljXLchm0m4P7n+160mLO0cKhalvbR8kFNXtMwi0sK0sjXqnXYwCPd6OTNeJAgLMJgqMEkjZsfwVAqk5IwmrjliKjrtYOLkTg7cFbW17gs3duaXAR/k/gth6D6NgumjuXzKO2EwaiPMprOGNosHjpNSNu28vV4wIa/XBu+n5bIz5I4SahCHhMShyWWCRW9UklzdLOuSUiq5XMzv1B3ODD4vLEZpTHuR77T1si/oumEzoLUNJ+TGSjBgzZwfz8UgNmjVTFTtyWWS1jZL4RSXiAH61pxmeE1xlrVqNbj2NKG623Hcn6xxNEz7O7SI2TiMmv2p0QiezaQ0pHksFc6CoNq4Gh47kiLU5VSiDYxnCf79EChaYu617nT6a0wMHc+ahK6lxDnZVpWseKgkmquBeysEdbHTsBva7f4q0WlY1jkIBQQTHwrF3J9iMyEuDre5O2bPOWnYW80oNlPb1rkdyUJFtIBrnPIwzdJpC9D2/SwSqL0FYyrSLBXPB94bfn5hQhkNjCxU+L5/i+d5bwZuBJrA533f/3xM0Wd7nvco4Brg93zfvymmzMnLqEKF7c9Zq+UP9G41YfdSsXPldn8awy3EpUiWolYT9pw2vH0ow0RK1iSn3NgxFeZ4eYSKWh1qOS0VaSZ3O5ajZU3EbGwNZYE8/7mD8G3Xi+2uUGFpscwE2GSE6jiDrK09UpXgdwX88xNxXl663Ul2o4l8yi2tWjdss3FSyppjJ53TlLPN57amsVIN082maKJhaDIYu2jiOCQFC7oCaN5MKlb9bNejyP1poRFct6qML1ia45m7b2tMF6x6JwnIeYhLr2mUJfb6Kyabjq1Ztftopx3bR2OFBcg1nimlshU3dsIA53jqjDuH5yrg6plFlP1phHUqIBA04ya0JkYpibiYinY7OO6xI+kxLXmeJ0tATnURrlaHhBtgOEmGnZXMVjK5lkkb25o5arIDy0KrFhrofbcMl8mdUjbOmlZUqEhWsA0kEGitwtwcyrFWqUqFgdgEd/yB7HEmCtSOESrSxj17nLfH0SLv2TLFVPi+/xTr82OncXLP804FngHcDTgMfMjzvF/1ff8/rGKfBN7v+/6a53mvAN4D/ELMsc4Bzgnry9JSwUn1hKnValEdOqtHOQjsaDRYKFCvTrsV/O60O9A+cCtrvW6u6zqwvsbcKadySoFzHTt1N821ZubxD3c7dLfvZM8E2vfo9h20up3Mc9ZqNartNeq7hq+p0zwetFG9xsLSEvvXWiycupudTrn1M85kBdg5N8fcnj3sbzVZ3L2H7SNeR+sOp3MEOLWxQC3jGEeqVdbn59G9Lgu12lDdXNqH9nEIOOUOZzDvlD2++/+z9+7Rkmxnfdivuquf55yZM3N65t47c690rx4IBSVIIAQYI5sIsSLAkkNMIVZIEMaRwdgkxlkGwYog2I4NMsbEImYJQ0ArDqhsRKxlE4xxWHFYK9KyJGMeBoIEQtKde2dOn/c5Xf2snT927epdu/betau6qrp7zv6tNWtOd1dX7a7ar+/7ft/vO8DVJMDBwQGmn23hFMDNJ++hzR032r+FCwAHN/bQYMWrIvD9UsTIbeICwO17T6OpaWNw5y7OAdzudVPHHTUaaPS6uDUY4OrGTVwCGNzah9PtIeh1cQ7g1p07ONu7gUY4x63BAOcOwbjXx507dzAbneMYwF7bzTVWeJySBRa7e7h592407jroKc51GC7Q3t1D++4TtG29LpxOB0PQcaf6HqC+l+PbBzgDsL+7g5bk8/DiHIcAdm/uY0FCjMZjDAYDTP6YPs/9J++hNRjgcGcXHRDcGAxwGi4w39mVXu9hu4OeA+wNBjhZzEH2buB2SfPf8d5NYD5LnW92/Ij20SdoHz3a3UMzXGA/47qPJmP0bh9gbzDA+a3bGE/ob5998ncAAPtP3Ufrzh086vXRjX57EVw1HFwCOLj/DOA4OASw03SwMxjgaD5D8/YA+4MBRoM7dKz0+2jc0FAgFDiOKGL8/Tnp90GmE/TbLdoPouf5qL+LrrN8novdPdy8E/XRblfa104WM5C9m6n7T/b28AjATmP5mxr7t3BrMEj0y0etNnotF3uK+3jacLDodnHz3j0cAdhtuegJ8+jF7QFG4xEODg5WptVdtFoYuS7u3L2b63vB4AnlnAMAD+dz9PduKOfz051dzIcPE+PnpOEg3LuB+eGL2Gm3sCN813VddMgCs529zDXqbP8Wpn/8CQwGAwTNJm3rvfuptp50ewiDUWr9XISzxJxzcuMmyHSC24MBLpsOrgAM7j+DcGcHQwC7bhN94RwXDkHQ7ePO3buYT65ov8oxjxJC8Gg8jtfF81u3MfmDSeq3D0mI1s5u5v6C3LwR91HXdeHOZ2jc2MfOwQAnAG72+4l1S4YLh2DUauPOk0+lPhvu7MEldM45JwST3o70OR32d9AGwc14HqXPc/LkU3T97LS07Qh6Pbo2DAaptf70xk0sDh9K90OTfo/O54M7uODm0cuGg6tGA4N797Xj6fJggKvpBAe3biWic7o1fBNg7C7wPG8fwNcAuAdKUfoXvu+frnj9rwTwR77vH0bX+CCAP4GocjcA+L7PlfnETwL4IdmJfN9/H4D3RS/JcDhcsWmrYTAYgLWBXNBchfPjY1zmaBd5QL0El/MFyHwBMpnA5HeFo0tMnIbRsfF34IAEIxw+eqTl4S/Oz4BWK9e5ldecz0Fm08xzDQYDLK4uEDrN1LEkoF6f80cPcXF4CBKMMAYwFY8b0yJRZw9fhPPCAyBcYESAccHfQabU63Xy4Hk4PX0iXHh5AeI0gaaL8cV5qm2pc79IGYDn0xkc4dgwJEAYYvjgeeAh5VuejSeJ48LJBABw9OghnHmSv8v3y1Q7Dyn97XgUpK6baN+cem6OHzwPp5mMhCwmY2ARYjgcIowSyYePHsLp7yI8pqH0k4tLhG4bOD+jx50cg3S7GA6HIGPa9vNHD3ONlUQbzk6BVhsnFxcAgIuTE1ypfvN0gsl8gemM/qaTF54HWjTScjFbKL8HqO8l4wGfHh7C2bud/jzKibmcTGmsdz7D4QsvgETP83QyhTMcImx3MT49wXQ4xOL8FGh35M+u20NwcoTJcIjFxTnQ6ZYyPgFg0XSBi7P0uIv76BzOcIhFq4352an2uiRcgIwDBMTBZDiM5pwrHB4eYveSPqvTqC+Tbhfjk+PMsaJCeHQIADi6ugKLVFwdDRFE92jx5DO070VJmEfPfwbONL+QxiIYpZ7LggAIApw/Ys8z+k2d5W9anLM+SteGi5NjaV9bnJ8BezfT958QoNlc/qbLCzi372A4HCbXnmYTwcU5Jor7uBjR+3MS0HF38eghrobDaB51MB0OERIAiwWGLzygBRtXQHh+DjTd3P2TUVCOHzwPx0luW0i4oPP5bK6cz8MwBJmMk89pdBV7za/OzhAI3x0MBhifngAtxbjjz+80QEZXtE9FNOLjYJyaRxeLEBDaAQDkwWcBAJfzEFfDYTTuDun5joZAp4ujk5M4snc5fISRuDbw82iQfx4lk0liXQzRABldptq6mIwRRnN8JlwXV8dD7MznmF+ew9k/wNkVjUqeHR1p1xkACI+PgG5Peq1FNy70KAAAIABJREFUq41FNOeEpycginmPzqPHmA2HdG2IxiuZ0PF+9uKLcJ5QtyM8o9vck/NzOG6y/4eNJsjluXwdOKZb19OrK4RuCzgZ0rYeHwGdHo6OjlLfSZw7WrqHz38GDhed063hVeLevXtGx5lW1P5PAXwKNKn6iwD8FQCf8jzvTQXbx/BpAF/ieV7f8zwHtMje7wrX5k3Ut4qfbwUK05+4MJmhpGxClSUPYgm6DCnPVWlDPHIV9NOoIQH0Xs1nlH6jkpRlx5WhYJVHRpOFi01/r6rgFZCg6ZBAEUZdtb91M5SDdIm0iZyK5vI9vj1xTkWUiJhIChVyCYqA9VGT+zCbL+lY7LsqWpkpTOlPPJc90S978f9EpMvIwCcLljk+IalEzyBynU0oMqI8Z68fV9xNJGpH/6/E448U8JxGk6NA8OpPSwpEom15IZWUjUQ1xCRTnn/N5uisnAqFtHGqYKmK9pa1bjDaEDePkvks4vdz3Hp2jVWxMJMjTUE35zA52NzF7+ZLkQEd/clkHojms1hcBZDmZDmqRG1hzk9I6PJ5Uu1u8vjEOVacR8X5h8mwy2hjps+wy9F1A8M+n2iThn7GzXtaifjE2B8lfx/7rg4LRZ0KQCv0QLicCofP3TKV/C9KYVszTCMV7wXwTt/3ffaG53lfD+DHAXxu0Yv7vv8Rz/P+KYCPA5gD+HcA3ud53g8C+Kjv+x8C8J2e5701+vwYwDuKXm9tyFCDUSKRU2G4IY1VWXJuiPjFVWeQjAM4T5hZrJlwqWZ3lkoRmU2psSBLnO1wk6yuzgK/KOk27abIs9CyjYc7N5OU1Rk9vBGlOi6u4J638NFyI6aFKNPHY8GpYbFJWDQq3BYVBgii1CgxcRZYbQMzDuDcedKskj0rXsYbiay9RTfnWVKePNfWifq9rF+KcrMqtbLEZnW0mgCB5NxSSVlhA0KFHh7qzyVusrlnHQZpg2rVPpCYA8VNn7hhLmrESlSHHLdFN2Li+OwmNxbO7TvZBqh2U8XnJY3k817T1c8DTEQizmMJ5AYja8uNW+pzmSCr8rUKOuMvNtIz1J/Ee8w2x7q11VQlkDOQEQRSfj8AtaSsmGicchRE44wZyKoxGfdrodaFCcQEYn6ed7lovGnxO3YO3vDt9GLJbVUeEQ9lThE798lweW7dcby4CcvNNHUohLpEbV1OBTN23YSSFmH3IQtlGvM1wtSouAfgF4T3fhGUjrQSfN//fgDfL7z9bu7zdwF416rXWSsKeo4JP8ibLhCG2RKmphrIIhLFlA4051csXkVgqFIUezFlHrt4kg30EYh4kl0el1mPQQdRd14HtvFw3XRVVxkUihcAllKBY27xFyZTx40k+1Ta6yqoamOI0CULhovlpppP1AaSEnu8XOE4iNXKHNelBYVW9VL3+sn+JQEJw6iuxjJRlYwDOGwRWbn4nWIzx+p1uC7Q7iSfp+MsvZG9PsA26ioBAnYcv2iWNT7ZuWUbblF5zKQYJFO34usfRO+T0RXdiLHoVrcPBGZ1eaQQvYFskzafxYp6iTYUVZqSFV1jkYpxkKwxwavpsHk0q/p6xmaJjANKDZpO5POeK6kkLbQf/d1lVehJkNpcxrLdZXhMZ4Y1DkToNlgmtS/cVtq4WkSb46amrk8wWqqVmbYvipJJoaionRLJiMY0ISS9seZVj3jw9VdYrYs88+hEGJ+J2k6cUZEn2tTtgwQj2kcnYzom8zhZNfWkEqpkwQi4LRFyAej3uXGX/n2mkQq5+pOy1sWC65e8kWjIJFnK5W9XpMJUxP79AL5DeO/bo/ctsrAy/am33KhkeZ8LVrx2TD12ZUvKApmysqFIjRDBPBG8JKcAOsl2E8etpJKTJ7w85z1ihhMpIF+Y+Osyj5g42RUttqij2CTaoJmMFwugGU0rzLhgm/Q5R1MQw/v8BpAv/FQEosqI6j4sOE8SR0Miotc8L7LG+0y4D6zN4xHQ6cZRO8d0IYo81tQLX6I6G0CfxXRCees8RKqEyQZGEt0AQNsu0rZ0BRYNQERp3V4fhN8wC154slKkQiEpG1CPJEvGdBK1DIKls4idR/wNYbQRyzImReUgHpn0J07Jh6npTIR5tEyPqalykIie5jmZKEpJi99FBqHuHpk60Xivt+47Yu0eBlmUIAwpDU08n0oVSBqdyzGGhHlvWRFbuNYsR7QpGsfLyuSGtFQGnUR2NzmeVBFapysZd8Cy1kXWPVJU1I7bAMjHhjjPRypzxhTVMur1rAGmLoMvAPDtnuf9ddDK1vcB3AXwEc/z/g07yPf9N5bfxMcAhTd5LCTKe11nADTJcjEfPG9ORTbthHrEpuVtWgy5lUxuUknrYJuvLIOKeXh0NClT5KHpzOdAvyNf2GQYj4CWInzeExYvqXeyGN1Oy0vloeN6LubLxT3OqYjawS/+3T4wm8aTrCNsAItuYPiNmNNo6PnS7P7wnPJgtGxvUeM5lrJVhPdFDxawpO9JNtZLyo7KYxfJFU4ntNZG2ZEKgPY3XspzHFAaG78hzSoGKW6cuDwnElwlfp+zck6FYiOWoiStuGHWScqm2kDnqWXeW1R3RFXYNMv50e1TOVTdvJflyGB1WqLzkfFoWbSPp+EBpWxupHU9TKClP3HjWAVWEJQVtwQM6U9mjpZlsTmJgcyjqZCGT+VTcf0yGAF3uGiJUuZZnEcVxocKBmODLBZ0jtFRzXh0+8D56VIymo8gG66FTvdp+WdRPkNmHqkow87yVlheUtY90kUq+PVYrHXB0/L4edRU8r+34ty0JpgaFT+JEqhO1xWZmxsVxgGlkLTb5gOxqJfVJPFYV9SnCAx/kzIhmSGaZJ0s/edoktXmLBgipumYeNTjxWtuZliqCl4BS5pOMFJ7PApWcNeFmhPocFQyEeEyUdtpNiMaVpSYyKr3Og5IYpIVFu4oZF4I4kZMt2HgaRNuiy744xGXU1E0UpFh1PFFkbhkQSLbCDNjIQyzOcO6CFdR8AZkwqhIPzP6vqYYpDg+uc1qOLqSeFlXWEyDEbDLLfLdHnB+Go/X2EEhVtzNC9kmOfJ8E3EcM88xE8Pg+6gsAq2hQbLfQA5f1DuSsnLx+PZnGF5GVM8sVGJUMDphRvE7IFnPIiGgkb7/ZDGPnGgGc6IoBKKaR5sNeaJ2PG8x6iPXL0UvvCoCIZtHc4whojQmuWuZUM040FoXDzghBoMIMg+tQEWP5rFMx3FUUIooikpmU/qcOznnGU3xO23lcrEeETsuMMx746KoK4+7GmFkVPi+/7NVN+Sxh8obpUPknXQcB8Q0+bZoTkW8WdUsHGWoJvEwNSpGGdeNJlkiekJTx1HvZ8oTVxQqbquIKMxOWvNsdS0g2+sCUK+w6rhVii0aJCXqkgVpoTsxUZuLVLSWXlEAEd1inPwdvX5xjru4EdNtqnh1jqjeAE3UDuOqq4WQZdTxCXxdwYMlbtQXC+qNBtT9lfVD06JKOeD0WA6P8KxFA9SgGCQR5yZus0qCq/T5oihNodoI4yCh3uUwQ1U0OtuGFAgVdPSniTA+ez1KZ7m6iNoQfdZSrA1i3ooIpjyjm/fcln7TlDAq+kBwlXbilOkxzaMcxCFBXxVhstEVDQn2t0uLEhLJBnd5H3JQQlkfU/H7VZEKQSRjmTs3StNDe9T7n2groz5KxpAxNOMzHoE5jQoWdWZCDE63n1/9STfvAcDlBT2XzplIwmVeheC4IVkOhXABOA25mIxubHCR+UR+xERD6eLBjim6Fq4J2pwKz/P+Z+H1twqvxeRtCxVM+fQ8+IXblCpUdGNhsnCwiaHsRO0MQymMQ6c6T606cTl5HLex6Kz4O0y9qTklZbVVkXkqguq4FYwKY+UgVdh4wSdqSyRlo7bF1zk7jtTKDDjDJhA3YrpxJy6QLJfDkPKgRGFJWcGoYOPsNNIz1xnV8zlweQ5gRQEC2bmB1OYknTxqEukUoqi8jKnokez2l2o6RSDJqZAZXk6jod6smmAmUcJxW9QbfXUpj+aw55ll+MYRCMP5rIycCn4e5ZWIgHIStSVqWcZQzTkzg42ubEwmct0kRkVWLl+ibSziKIks8Gg2lzlmPDhaDn++5TzPUQM7kvlxOo2imVxbZcfpkBqfEoqvyb3mETk8CL+GZ6njRSDzeSRtrJn3AIP5UTOPmhhevEy6qg2yc3CR+eXzvNI7DWXn3jJJ2axE7XcIr98jvH5zeU15zGFYZ4IHv7l0TDeKWd4tFTrcJKYCm1xKolfEOQOZkQoudCo7D1tssgwq/jhGK1sFpjSdaPFyTGWBdbkNPE1HRZOK7qvM+6ZFnhoHqhB8IlE7mojDtFER035OjhKvAUGjPS/EjZjrqsPs/OYeWHKVV631kNGvCW/MsGRBRmcTOfhY3iOlMd9LHlcaPZFrQ2peEBZGI6GHVN2G5XfI6Cr5+2QbGkPEOSjivWS5G/y12d8FjFgSRvxyWaQCAC7P0rlC4J5nwvAtmFMxGYOMLhPnT6Cp6f9AYpOvnEfbbSo3W0qidkFJWUA954jjWAaBckMIWbZFsTaT2LtuMJ54p5yORqpK1FY4FMjFGT0+FYEQx2M6quLkFTsIRmm1MvY+A6/gZ4Jejwo9XJzH59TmEfFgHnql+pPhvCeOO8m8oEWoMyo4OrIISWSenJ2knWgKOM1mpIS4XTkVWUaFGHfeJmrXZiFPoTcGfqIxNSpEb4MhHNelk4lRTkXN9KcsWhObZIOALn6Kqq/xJBssaWUrwTShmOl6uxrpQh46GT1G04k86koeNaCUUpVBuhHTQZUsyEvKionavFeXPUu2IHSEBbVwpELo/1GCphQiF5sl0pp6klSI+3V2TsWyiFn0PBPJytFvYPcoK3qVdVwRqCKYIofZKCdrlKCVOS1mIAeUHsG3O3ZyFDAuZ9NINlbw/IYhcH62fB23PR/3fHkdRdE11scvzwXDS/GcVNG0LLpp7Kk9Vh6X6ciYzeO6AcvcHBapoPz+BDVwVRTNqYjaJ03eN5WU5Y/lld8UkrJh1rqTaBtPf9Lw+5tRbSZCEm+nRDLE+VGMeDG1NwZZLmVeY3ksrIuyWhdzRZ9XIWrP4niYbJ/JWpiVHyqMJ7X6k2Z+NImK6yIVunlvwRnQPc3z1MGUYr1ByDIqSMZrC1MUoT/xmxuTQl7sO66b1kw2QQadhxSNgqhgqFJEgivqze0oKj3H9Ce6MVEaCzxNqoyNl6lHPSPMnsI4oCFuFToZv6OI2thsqk8GFiH57dRzS7iciiT9ifD65kLoOmEc5dVX5zER+qiCLw2A29wzQyfZjwrDuPgdf10J/ckwvB8bIlk0gCLgkgUTEA1ansalgqy/MoUrITrkqIwZE8g24z3hXqYS4gv0N1XRNdbHp1MhKVSkPy03WLJaEiYCFYnzSelP6s1bSsmn14+Kt11F/H5ue1AwmpPCfF5sbWLty0qIVUGkD/PfUUUqNPWRRMQG8uUFvd+6RG0gnaydilJqxn43yhGYTpfvxTLAwnGTIGXAKCGMz2UeSzpR2/gZRu0Jjw/pa5Nct7g9XB6G5tyr0J+MojmLubzwHX8uqaTsbLkWdMTnuSLNeIORFcNyPc/7CiwjFOLrgpmM1xC6zY0KwQjOwd3o+zkiFUU3FVkFrCQh1pVgmCdClWE0kYVuP/JCnurbFlMgcnjkNXBUnjMRsVExXz05DVh6VlXPuoj6U94kfL6gEIMovZfKqdBEKkSayHwmLyiUgdRGTKv+lPS6Ob0+VdNZLJZVVwvAcZylrKj0usImqNuL1LwkeQDA8h5lhPeXx5VpVETnEpMFxZwFE4UgGa2s26eJ6DNBqppPYM8LmUod39/EaOaqRoUqUpFqg+gxzaI/ZUUqDLyfpupn7HyE0HEt9rWi0RzZNVfJqRi+mHqbzLiogwJO06UeUfab+SiTKqdCRpXTodfL3uDy4hW891sUyRCebSKvhqcGdqJ+LDNAe306l82myug9j5RaGWuHxKgwVn+KhB4WR5FRwTbXJk7WLLVJ03mvJ4w7cV4wyqlQ9C1drYtEvpL4PPMwArYrUpE1uh8B+Gnu9ZHw+lHpLXpcURr9ySBkWHRT0e3RAlG69kTHlYI89CfdNWN+/lC/ALBJ9uK0nN9gQNNJcHfdebZ6F5DN6WcbsflcH6nI099yKmI5HYlBFRsVbvJ/9j6v/BLzXIeJ1wCSG8q8Xk1xI6bLZVqkN/dUUna+uhiB7roihYAZaCSUboTje5Sxucw8rghUyYJiYim7X5rxIKWVdfsgsuiBwfmUCNJeW7a5ISdDoCdEM2UGsglUGyy+z0pzKoQ+rzJAM+ZbpihDToZqtTKtUIHYDzXz6Cp5TolrFqc/KZ044jiWQaxyz0WZHFdedTx3Ecxun3u2iu80hDwzBjG5u90GGg35mOapVjdvRd+XGKC8F93AqJDSX8VaF3nVn1ik4ugRLdTKDD9drhtDFjOijPkxojBrVeb4PEEBcRK2ZJ5K1GSJWBa55+hVREvWBK1R4fv+szW14/FHgUTthCfaWP0pKJ5InRVqW7XSsAhjo+JKf804vHkM7N82O+4pRUGdPDDxcvDyoSrpSA5kPtPL4wH0swefXv4toohRMZaEz3WQ5ZPERoUmUbuVnGSXfHCFJ27vhuEPiJCnToXIiWdG4mK++sbcxEPMJ/E9fH75N0OKM5/BLT49pvKFReklEtBkwU7iWUuLYJoUg5TRyro94ETgW3N/k3EBjXZZXhl/j4Q2FC60p1DCcdzWkicspbREz5Pds1aL1iIREQR0I5alPHN6rJ4v8kQqOtz5RGnpbg9gdKBVUFBSFkBElStJ/UmsUSPZ4IajDNVBEd1+/Gy16k9AOllbpP+xXCvJ2I9lnjkHIJEZoDzf/8at7PaPA2BXmG9F51Fu9acop+LoMLkvMdgPEdk4lpw71/woHtftLVXmVN8PNZEKIGZApMBF5mMZ9qy2ytrOojxbgqycCouykLNOBV91FUAO+lMGdUaHrMTj8QjodNUVc/Mi+k1ZtDAyutJush1+0tAZVNxx2pwFU3R7MU1HCX4DGSUNE1nxIwaDaJCTMTk5zSaleJhEReLr5vfKpZIFQzFSIamozahGjQZ9VpIk0/h5FtnoiRsxExpSi+O9Tif6gkumMNnMNbmcCqlxxRnBvCqLiB7X/8tUfmIQvdSSWjhGQg8yekWP34gpolV5IROU4B0KUi98AW+gil/Ob7hklK7TY6DFe201idpZEUt2PtV8xgrxyXj1gnHr8P1NYniVk1Mxyx99ZJDNOcDy3umqPOuMipb8/i/pTzk2gJqkeQDp6C00Ihmq88XRQ+55SCLN8TxqKnYwlhRlE/cE82yqWQIsUnEyTP4+RXQo2R59pMJptWg72D3KokmdHqdzM03mGV2iNkAZHrJ7LBrQ/PM0jIQ7pmIwGwRrVNQFxcSlRFx1lQuRA9kDcQU5TCcr1CbyqFeFafQl6zexAbqYazdVMS91oUmkywOTCUn0iLHrq2CiONLtxedQRhYMoiLS6xpzPVmyIOdhZb9LSNQmkjoV8TnYd2SbryKbGHEjptncE9FTG/ejhflGQgXXVRt1c5rAxxJhHe4+JGhX7Q41DqPIiTanCCgnwqI6P/8sVNLN3V5mTpZIK3O4viz16heRepWNId3YV21WsyAahwxcH08aSt1lG/j+5aqK3+WZ9zSRCkLkMqai1znRjzI2l0Uxn+m9vjr0+rRt4r3Ko/7ExuSMU35T3H8yusrnRGPtY3/LwKK4/POQ1ZjQnU+WHJxFfzJBIKcnSulPpoYhG3fzufG8vGyPwZrUje6R49BCljIwGXbZPGogMEF0idrsHFk5FXxb2d8mUEVBNhjWqKgLeXMqZDQOwED9SSEzaoIs3mxZqkkMhkVwUnKTIhJebgPPnvh3UZho6fOeHRO1K0ZDykjUlv7No6mRUpWAyBYlHWTRhEUUgVElaqc8N0uDOeHtXUX5R6QR6KQLJQnTqTYUhc4TN5vL74PwdyzlKbZNBEsWFM9VFsRkQb46rnBcNv1J8h0G/rezyF6hSIVEUELm6Y3bIDGQTaCS11QkalM1neg1LzWriFSkCgyK0P0msS2yvhhLKnPqT6xNFeRUkHBBN8+r1KkA0u0wUn8S7kNCUlYVqcgXsXRkjhERbM3jcyomiiix6nws9yUQxqQYzcyblyShJ4rqSPGc1jR8hjLhCcBMUjaui6UwFvhzdnvyitcQ5tGe+PvYPVohUqGa9xbCPC9zmmQhy1GzgbBGRU0wLnzGIHoDWwYbUmC1jX9GjkDeSTYTYvKc6rqjK/0mW8adXuU4QxjRdGSRCl0/MEmG7xj8DpNEuLzX5SGLJqQiFcICOp8ta0IAiQUheW7Gp88/maY2YtqciuRmRCqRWhSuq6+PwVM1dNdlr3V0OL5ia5njk2+DLFIhaau2GKRsbtKNyaK0pLjOguK+qtqQt7+pvLYq+hP/2qSPZtWNMZnPdHOOzqhOtZuuDVrqZhZimtIK9CcgvUnOkVMRb4r57yiiumFwlc9IN3keDYESCqhFMtg5HKFQq6y/RrmUCS98x3weJeFCnlcgGpMmVLPE93mqkeG8zBAEaWljEWwtzKIzq44ziFRgsciIVCiogTL6E4Bckv9RJDe3cugaYY2KuqDjdssgegObBhtSYHVJ2dlU7WHNUxzNBLkStXU0AEMPs4wOsQpMaDoyo0LXD0wiBgrvbgJ5hQFy1iCR1hEQJWXFUP9invTisklWFnIXz20KsY/qaIeyehERjBPWVcjK5WgqNp6pjUXUjqxxZ3pcEShzKiRtVajHkdlMrlamG5M9vZNDiXEANISNWKsd98u0F74g3S6v+hPARZ5Er62M/qR3EMUVd6GRqMxlVGgioKzdjJZbBHmVgwQ4qsjwfAY0m/rNZ1aitiTXjYxyOtF6BuuLLFFbUY8hfi2qlanoT6lnlmMeHQt0a/5akToSgPySsqzWBYRIjsl+aGKQH9pTrCGp4xTzowmFOVzo5YpVtS5E+eSuZOxnYZWo/ZpgjYq6kDenQgzhG2zAyXxO+ZlFN8xZHruyIxUmv4mQ7AiJKa3JZDOeByYDPsHdNfi9RjxSzSaUoUhkTNyI6SDjvEcRiZhOwWuyA2rPjYyOAhQL+4obMdWGDci3qcoLraSsuNho+mUczckyKuhvVqrOrAAxWTCmXcg2MaqNuSoPQ0VRArIjHyqIlYHBqelI2lC40J6yToWC8gDI26CUlJUkzoroZvQPnSMjlVOhfxYAVkvWXtGoUFZZN8nTEIvHigIaQCqySEYZqoMi2D3T8PsdSaK2UlVRNfaZgRwIY1JlsJvMo8rx2V/WugCKPUNVny8jPzTu/wXHCZN51vXrzERthfNDiMw7ivlHi1XyC9cEa1TUBd3mRgbRG2hCfxKrCedFltU+Dlb34HIwUimajGmioW4gcl7IzOJ3DCUmamsnJH7xMklMN4hUOCbGkYm6Bo8gvRHTgl2X90wrErWX9Kd5wovrqBaENisoVGxDmfKI5akXwVBGpEJ33ZbEuBLbAMQbqcw8KZUnrgyIyYIK+pOj492rBAjYRlFUZWHnL6QApnBCqIzYggs3ESWJGfjXCrqFYyKvKavrIYLzZkvhCoY9Dz6vAJyajux8svGeFwZF6rToKdan+Tx7kysaDgYRZJI3Ms/lPymjJrJEbZlaGaDcMMcGsjgmU/MorXVhNI+qItUiPSivpCywvC9cn3cMnKwm998xnfcyqLar0Z96yWgOQ0qYxNAA4uCYtG/DYI2KutDM5zkmwiB3Gk06QazKx9dAGV7mz1+2ZGWWSpHJJttxzLiV7TY1YlCSR5edQ7fQcouXYxCpMHqGXc1GjCEv3S5vLo4sWTArUdswHEzlZrvFqS+mHrH5jN5D1t4yc26yVKdkHqxmM71YmyRq859XllORTtSWbkCUkQpWB0XYIEXPyulJDFoVVzkDys2Iin5gMo5lECWJGdgz5GVj42tJjABdToVhhEo17zk6R4YsJ0S1kV2lGKHuennAnDgy+lPWOYW5lyRyKuT3KBxd5XOimaxBkkRtpUiGih7KPhMpiWIEjuVamcyjMQVLYUyycxR5hhJxAiMnqwkzwnDeY78rlZtp4lAQq5+L6PWXtS54mEbmdVAZ0hsMa1TUhZaB2gEPWTgyK2TI8jAKqz+pw6WxlnbZ6jKZv8nQUOI2Jyok1XTKoD8Z0HTy5lTESaYaxQtu4VdGFnLmVOT2ysl4vapE7cUiqiwu99xIn1nBDaVUUnaxkCeYRu1xZMpJqxrPWTkVorRudH3xeSqjOSKyaDCrQEwWjA1fMbKgoAEAGhla1gd2Ul9xVsmpkN2v2Kspp9tJteZ1UOZUpHN0GBxpTkULCEOaLBtBmYMiIstTK0qpcpBGWtiYVOWdrOIxjdWmChoVKsNGHE8yiHNvQpVPblTQ+kg5vMomXvM8ido6h0JXqI0wHskNINN5dCxvgyOucSoZZR1kNC4T+pOJo6ur6K+p4xQOLBad0zkUsiIVqsKfqYh0gTk6D4VtQ2CNirpgUviMB5sIeK9Hlvc5ryyoCF2obTYtR79fRFZRQEUSWwqmSVBZdIE8MKHp8Au3kaTsSB8+B8x49jmLLeaOVMjyb0JNovZiQb05MilV2e/o9dMeyQxIN2JaSU1R2lXDKc8JR+eJU0nr6jbCGe2JF/8qit+JOQfjUaTK0kwfN1cUg1TRK9gmtp82KlIUD1OoaEOq/lZ0wywm+jOwJHxdG0RnEZDc+GcU/lqeL2PeM5CUNRNPWCHPSXe9PFBFlEzoT80mnatjSVlJTgV3jwghdP4pklOh+06cqM3tA9iz7sj7pXTtE2VMZTUmouOM5lFltEQYGyZJ8SKk0TkDdUITuvWqidqsfbp+nVVRW0nLEyLzPUMqa6JtkcPD0p8sUjApfMZjHKTD51neZ1OvvgosaUnmHVTxPldFFrdS4UFJITe3cvXiKY1aAAAgAElEQVTf4TQaesoHsFy8eI/YQvcMS0pOU2jfa6+b59lKkgVF9SenES3k4UIeNs/wxOXewMgWxnjDILkXs+SkT9V0OnpamSkiJ4IUIl1Du+CZ0p+i75ZRKV51bvasVQaoJicrXhQVspkNJcWjWK0SR3IfYgqEKiE2t/pTRk6F1HMseZ4sssHPC6r7JWBJ68iIVJioPyXaJzcqisg8L6+nuF+mYMUghedEhHEsg+M4Se84M+CaCmfPnDkocsyJJpx5WaRCJZKhO58YgYgM/fRxZvQnpUCImKBvYsAJkI473fzIkIf+ZCopq1prsnIqmuqtcmz4pAQEFPWICtCMtylRu6DLoDx4nvdXAfwFAATAbwH4Ft/3x9znHQDvB/CFAI4AfIPv+59aQ1NXAx9+bRko7Mi8JBkhQ2Lq3VJBxy9cNQqigiGly9xjVzP3vKOftBPcXZepIGUYUWX8hlYLuMgXqXAO7hofLk0WZIsE79VpNun7C8mGIvaKKib6vBx3mQGaxSkXucHdHvXEmSasq6ClP82B/m7ymvz/ifYYGstZm8sV4HR7IMDy/qpyq/hI596N5Gc6GVoo2t3rAdMpyGJBDT5TqCIVKoeC26L9tGidihz0JyUVhD8foC4wqDyfYi7IbVQozleGtOWqkrKMvir1CBuck3e0cPkwjtui/Vty//NRQg285jJJWYVIRjzuJOdzuj2Q4YsAuBoTqjE5uspue8b4JOMADmB+r3moKH8Zqo9UUrYk5oHOmdjt643lxWKp2iU9d5rhQRYLWlAzkTsXPc8i9KdVBBJqxlojFZ7n3QfwnQBe7/v+awA0AbxdOOxbAZz4vv8KAD8K4IfqbWVJMFH+4SELZzYz8jIMvVtK6BK1A4NKz0WQoVK09HBm0D9M+YplGxVZ4WWeuytWdZXAKLfBZPEyqVjKo0gNEjGaICZqA9Qzt1gsN9jc5Kx9Zjp5UhVkG7GsTZW4WHT75RjOWfUx+Otq74Mhra/ECJzy3FE/J4po2lLoQRbpVDgHGCVARX9SnU8HVe6XYtxIDWQTKJRwYlENDf0pMY+yvjATvNeStqrOp1OBS7SVh6T9y7wAeURpJY9pEeUgEbIcAZFmogJPucnKdSviRDPh97N2hkKdijx0PSCaH5mRr6gxwdpilFMhoVuz6/CfF4hUKCl/qlw3gKNbZ6z7OSW35YZ+FtsgI1FbRgWWUf1UY0uHTkSxtpGKXHAB9DzPmwHoA3ggfP42AD8Q/f1PAbzX8zzH931Bv2vDYVjojYFMJB7rVivFVyaLxdITcXZM/y+6sWDa2jr6U9n0isw8kXyJ2tkqEH2QVrt4sqCIbg+4vAC5OKevOx047c7yc572E3nryXwG3idFJhNgOqEvri6zDShG09E8Z0dirBFCEJ6fLtvKw4R2JaLbS3p4xERtgG6YFir6k9oL73T7IKMreVtVOBmmz6cZd6IKU/zdPF5xFTJyKviKqixZUJmwDgNj3jSiUQTMW3lyBFycA1cX+k3Q8RBk/yD52fmpWjYW8kTt+BpHD3NUciaaSIqOTtIDLs7z9bfgSs0vd1tyhTmZkShzOJnOe7poH3ducc5JXM8gmTSus5PTwCOEAJcX9O/L8/T18kKcc4CckQrBqGi6cvqTQq1MCxN+v4T+RJTCAhmR3CCaH8+O1Nft9ZfH6XB+CrQ1amVnJ/QcwSh/Toxs48/3eW69JFeXQBgCl2fRd8uZ95xeHwQaUZCTo+U9arfh8POUSUVtUGdLPMa0UUDzOTqXgteGYK1Ghe/7z3ue93cBfBpAAOBXfN//FeGw+wA+Ex0/9zzvDMABgGGtjV0VOY0K6SZPwkMM3/s3gd/+2PKNZhMpVRZDLHMENJ7GSnIqSqA/7ezR5NGsBWt3jx5bFnb2gN/+GMLv+ib6uttD4z3/69JblfCICaF3AOTiDOH3fCstWhjB+cIvy77u7h6cXc3vkBhr5IPvx+Ev/4Lmt0g2djqIyYJxojZPf2okjQp+ko2egyN7Hju7wOnR8r7mAU8t0qjfpNQ5WJtMCwDqEG1gCCFpKpVYWRwAdm5I+6Wzu0dD5js3Up/Jjyuxb8dto/eT/KMfQezJ+cI/ITmOXjt8799QnGcvTfFoNIH+Dhp7N1OHOzu7IADCv/FX87eZ7wMMu3vUsJF9trMH8tFfB/nor+e8jmLM9PrAbvqZOTs36D3kxq6MfkNM5z12DdlvAvQ5RXMJXXE36v8yiq443llbH3wa4d/6a2j8wD+Ac+fJ5Ge/8DMg//IXk19YJV9J1gYTlSwgGb2dLZXfiMyoK5Kf2O3Re6kbg5HDgizC5QZUZVSwMS8b+zt7wHSSmB8dab/eBc5OzObR/dvp99ptyib45x8A+ecfoO/df2n2ucS2Asn1xU0bFeGHfw3kp340+V1Vv2bY1awh0jZI5tidPZDf+ujyHrktNP72++Awx4hxojbXL2VROTbmdeu2DKbRpg3BWo0Kz/NugUYingNwCuCfeJ73Tb7v/2/cYTJycypK4XneOwG8EwB838dgMKigxeZwXTfRhvGtWzgDcGt3F65B247mUzQO7uIWd+xxrwfHQeK94eELcF7xavS+4i0AgOaTT6Nz94nC7T7c2UUbIW4KbQzcJs4B3HrqvlH7TXHc6wPhArcV57xwgJHbwp2nntKeJ/yGb8H8T30V2hltW/xX347wa78erZJ+w/yd34Xpb1GjbvaJ38P4134JtxpOfI+u2m1cAjh44kmQYIQhgN1OB/3o89nZEY6nU/S+6m1wX/pyAED7tV+ceY9n7/ohNPYP0FQcd763h3G4SPTB05NDzA7uYufrJAtMo4Hul34FGjdvGf/2kxv7CM9PcBBdI+j3aR8ZHMTtP2y10Wm30NvdxTGAG7dvoxt9Rg4OMP2+96D9ui9OcVYX3/DnMXn2FZAMdS2c/g66r/ui2Hs8vn0bZwD2d3dSz/zEAUi3l+h787/03UAYGvVxcYzzuLx5E1eEYHDrVsr7dxgu0Nndww3uu7Pv+2E0Du6geTt5PvIn34Rpr4/2679Em+dB3vQ1mA7uoPOaz89sd16QgwNMvut/RHhxFr/X/vw3pO4RuX0b4//2f6BSnBK4zzwnHZ/Td/8oOveeAREMC/LGN2MMAsKieKZoNNH9k29CQ9jUh3/m6zH/3Neg/dJnU1+Z/ZXvxez/+5181wHQfPpZdCS/afa970FjcDf9PN/4lZju7qL9ujfEz3N8+yDVR0duExcAbt+/nzpH4nxf819g+opXofOyV8Tv8f1y4RA653SXcw7DRbuFkevizt1lLlXovQPzL/9KtO/cSV1ruLOLFkmvDeM/+G2cTSe4MblK3YuT40PMuTnH2b2B7ud9fuGcpZMbN0GuLhNj9ggEzf4O9jPG7LDbg9t0sD8Y4KLlImi3MRgMMDu9Q+emfi9u/+SPXJwC2H/qXq61YvqD/wDu08+icSNtJAPAfDLCEYC9fg+96LxHsykaN/YT6zoAYDDA5Pt/FO1XvxZOp5P4KPyz34jxE09Srz4Ap91B98u/KnXcwnsHJi99GVXey4D70pdLx+fk+34YiwefiV+3XvHqXPeEfPXXYfE5r4b7slfG743292n/vnEDzciYuTw/wZXjYO8vRE6EVhu9N35VMmognvvgANPvfQ/aX/Al2rwr8mVfgWn3h9F+/Zel+t78m/8Spv/x6+jfn/kUgl/+IG7Op/G9eBSG6O7sJObrxLlv3sAjADsNBzts3IUzDAHs3boVP2cMBpi8+++h/XmvS7IZMhB807ehMbgb903durMJWDf96SsB/JHv+4cA4HneBwH8CQC8UfFZAM8A+KzneS6AmwCOxRP5vv8+AO+LXpLhcL2BjMFgAL4NJKC8x5PhIZy+3usIAIuLczh3nkqcY0EAjEbJ964u4bz68zF6w5+O37tY4beH7S4mJ8cQ7194+JC2fzyBU+K9XYQECILU9eLrnhzB6e8oP09g8BSQeZwD7N8xOM4QvT0guvek3QV+7Zdw8sIDOG3qeQrPTgEAR2fncbLV5ekJRtH1yYsvAAAmr3k9pq+mG8IRkN2+/WjRV9232RxkOkn2lfMztO48megrPEazRa77EjZdkIuL+Brh6QkA4OT8Ak4neg8OxleXmBweAgAuRmNc8td49lXAyank7A7whj9l3BYeV8fL6YFpuZ8OD+HcTFJyFsEIgJPsW0zW0eA+iGOcRzihkafhiy+kKBThZILxfIEp/91bd4FQcd3nXgUcHWW2By/9nJXGvhavfl3ipbKPvuaL9OeRfefgSQz2bsrv5Wu/1LiJPEbjKTCWnO/es/I27N+Jx3FeSO/5bfPnSUZRHz08hHOTbhbC4SMAwPFoDCfMeKb3n0u0ge+XjNLBzzkM4fkZ0Gyl77tiHl20O1icnmCmWBvOHz5MrQ2L8zPg9iAx51yZ9GUFwoYLcnGenNfGARYhyVwjFo6DxdUVhsMhwotzkKaL4XAIckmN4PPjo7j94UOaBH06nuZb7+7eB6Yz5fxBzunzuDg9xVV0zOLyHM7+gbz9T78cuLig/0S8/o2Jl1fS4xzgi3LMo6o2PP3y7OM0GLzmCxK/LxxTR8Hxw4dw5tQwCo+PgG4/0VdGF5fAxaX+5M+9Cjg5yW7Ec58rn0fdznINv/0fgF/+IM4evghnQB2ZZDHHeDZLztcimi6ujocI2Lh7RMfExXgSP2cAwDOvAM4vAEiepwr/yRvouaLz6NadKnHv3j2j49ZtVHwawJd4ntcHpT+9CcBHhWM+BOCbAfy/AP4cgP9r6/IpAH3CnAyykGirla7aWHZBum6P5nPI2hN9XiqyVIrGgVxuchMh07uPFZGaXDG4AgmZeSGTlA1GcDRez9wQuZ5M0SSRU9EUcipqnnKaEr40w2wGyLj8ZUAnIW1SAdji+qAl6SvjoDxpY0AtVJCHH6/idkeGOxmP0rSCYATkiH4WaoNY90WFRE7FfDk3RPeAzOdJShK7Xplg3vREonbOGkGPA2Ty6uNRNXV28kC1hutyKoAo2ZtP1KZjubTczS3CWtWffN//CGjy9cdB5WQbAN7ned4Pep731uiwnwJw4HneJwB8F4DvWUtjV4VJ4TMe40CeU8Hzbk2rruaBqu5CEFCN8BxhOyNkqBSRYCRP4txEyPTuOe6uzLA05k7nRWvJ6Y8xHsHpl2uAJidfofgdkE7UrnuSzSspWxZ0ToQisowWjy9kfSXS6C9F2lg8N0NeJR+VQhZ7TyFFXqrMsSqnwmQci4na7Dsyw6sqZ4/MsRRI1vrHHZJcN7IJ94HV6krUXwqzxTvEsbGu9W4DsO5IBXzf/34A3y+8/W7u8zGAr6+1UVUgh6Qsmc+opJoofSgq+qxal0KGXp8qQYiIvAgrL3ICZCpFyesGcPr9nMz6NaHDCkQJKhDs2YtVXQFOurACVS0gSgqO/g4CNEqNavWByRgkXNCEW1midqNB6wysWviqKLSeWknCdFlQjHcSLigP+houNhYKqDa1ZYxVnTPLoGgcD6fbBwk+k/4gLoooMypK9sJ3+8A4AAnDpeqWsaRsiyp2QVB+kxp1QVSQrmQnmlBRm9aYUKiVPcZwWm66NkjeCuZVQJCHJYREkrIZ/avbTxoi64rMbwBsRe26kEf9SWUsiKXtV61LIYHTUUUqKhrwWZKyQcmerioRq0AIetXRs09VdeWPrYL+xK7PXUtaD6AoRH3uOFLBTStNN1lRu6pNvAoZ9A+nWXGkQrwu88zVfR8sNhfxxp+PYJazGadzjkLeOG+kTkV/YnRZVX2Skum5IASYcjTg+YxWxs4Cfx94h4JU0ncER1KQbmWIkrJxjYnrZVSo1qe1RyqYZD7bWzEpa01FbQDpsRHXZbp+ziNrVNSFQkaFrKI2HzY1rLqaB72+dHGgdTMqGPBZkrLjYHvoT3FFcrlRAUDyDKuilSXDy8wjVqpRIRpRUklZmlNB1hUOjq4n1ncBEFW3r2Zz76jG+zUOi1sowPpoaoNV0kZTUb1YWqdFB1k1a2DphBKkyMl8TqWyy/TCywoszg3HMT/38nkYdThgGNjcyObKKtgG2wBVFfk1R2wc16UyuoFmTZNB3DfNr6/zyBoVdYElg5kkakdWcqrgldtKJzYB5Xq5I24gESXoggIVl02gWPBijEdoVDG5V4F2m4bME3rVAsVGFm2qgFaWqt4decRKNUBFI4pNpLJE7TKq6RaBjna4yMkpzwNVfYyFNSosBKgqOpc138pEG4BiORWzabqoJttMifSnSQUbZmHOIWFI5xeD3+HkyKkgZeeCMDCPN4vqVpW7semQ9vkgX7HBqtDhcgVlBV0lcMRcVFlhyWsCa1TUBZ0ajIhARX9qJTcpVShU9Ho05McVY4uvVRX9KcOo2JZIhaz6ZcobKEZmqryvAGdURBViSzTQYqM3noBlidpN2ufXHKlQq99U1B7RqGOw9CcLES1J3kORCvcqqObY3JEKCb2Te52qdF3B+uSIhcYWOXK1WpwoCE9LbTZptHiWvP+VOLNE+lMVbINtgKqK/CbcBz7qsGD0pyz1p2Si9toi8xsAa1TUhTySsqrk3VaSG1uJclBX2CgyVJZTQSf6VGQEAFksgOl0e3IqgHT1S7F6cjOpdkXK5hzz1wGW14qeZ6kLpZhTES4Ap7FMoAToIpqQlK3bqFBs7gFzKcoiaCqMmWvswbJQQCavOSnRa6vLqcgrKQukc+5Uidpsw1zm/N0R5pw8EVCVpCxA78NC8JpXsO44jQY1YNhmdVKBY3AbIKhgEULKjc6tAj7pmpeE135HoAauS5hkA2CNirqQR/1JlbzrtoDFnIZ8AS7Jt2R1Df7cDOOgGm+KLoJTgXe9cvT61FBgEDeuotpVRbQyR+xv8QJfplEheg0X6YS2VKL2uiIVyf5FCKlYUlahuhPdh+uoX26hgIwqFwTl0p9UOUU5+mE8/4t1jFSSsuMKnF6ibHceI11Ff4o/S7IAKouQs+gtsGQlXDP1p1Su22RME/A3JlKhib7L0O0D0wl1hALXOnfOGhV1IU+itioCIW7AK0jySoWXGaryIujuC/Oubwn9CYDEYyEsXuumP5WtGQ8uYhYu0gltcfG7DZOUzUObKAKVE+EaLzYWCgh9NPbalpioLZXtLpKoDaQSspeRiurpT+xcseMmz3jijav5LGnYi7luwag6ZxZztID7HZuwma4T4vxYlbR6EfBruHGithBBu8YRaWtU1IVc6k8KqViRQjUelVN1lUdX8AQhSoariu+oSmjl2lBqwbaqITMqUupPNRoVM8GoKJX+JIlUiAltYkXtmnMJpHxpoPrNvUzRB1hfwrrFxsJpNOg4mQte27IcACqFvfl8GdE0gazaMKCUlCWq3MBVEEcqhM2bUU5Fa1kQNDPXrUKBkGZj6QFn6+wm0H7qhMLptQlGhcOv4YaJ2ikp2ms8z1ujoiYoNzcysA7dFowFkR9eVtVVHiJPHlhqglcx4FUJrVwbtimnwhFzKrKMimBULa2MeT/jqE8V+TdcnQoxTNzgErWbbvkqVyZoSTjlVSdMq3Ko1lWvw2Kz0XTTG6xOmTkVMvWnYjkVfEJ2LBsLqOlPpdJzxc1bjnHM8/gltNR4rgxDYDKuLnmai1SU/qy3BaJREWzQWt/j1nDDRG1HFDG4xvO8NSrqhGxzI0NU+CiR8ApIrPsKknx7QniZtYf7rFQY0J+2Rf0JQEoFQrp4VSUdyUOVU1Gm+lOzmdT0XsxTk68TS8pWKN+aBZn6TdXSrso6Fdc3gc9CA35eKFtmVKn+VEBSFkgaDyxKsXeTRgEEClHieyXAcVu0zcLmzShHiZ8T5/NUTkUcVZzSSFFl9CcmXgHQZ91q0/oI1wkiQ2GTaGBdPqeCts8xyakA0rk+WbSpxxDWqKgTzQz5VAaVsSAknZIqqDOyAm5VDniNUcHC51tTpwKI1J+S9CdHEWavhVYmcFZL9wQlJmBJpIJP1K6o0FwmZJuqqsPTWTkV15Bra6EBT79hXttSJWVVkYoikrK8wyn6+9ZB9Bm/bjBKS4n0XNaOojkVAL0XYpSGN+rY/a+KdttsLp9HSZXTtw4iQyE2QDfgXnR7VJFyNuNyKgwkZYFkv2y6acfwNcD1+8XrhCoMLUKlCJTyPlcwIWkWjkok9tjELlMnGZfvXa8c3R4wCZYKXfN5cjPN94EqaWViscVxALTbcMr2nPB0L2midmOZU7G2SIUroSFVTH9qKmh9NlHbQgbe8C2bNlSWpCzL3QskhsM+Myq4dWM8AjpdOFl89LzgC43lGceuEKlQ5VRULRDSbHL0pw2RUa0ZTiOig8f0XIXi5TrAK2CyiFJWHxapwOuMzK8Z1qioE6qEOQFkIjcWHJn3uexB6LbopCfzOFXB+zSRlN2EicYUbCMwiQyG+WxZswCA00x7JCullfHShVVERLq95YIgTdTmIhXrpD+J/YvRJqqKGLQ4ryiH61wUyUIDPppQctKqo5KUzTkmnUYjqjacjmI7qkhF1XNOXklZAJhOABKm6wcJqoqVrTvNJf2pErbBtoAvRrhJ9Ce+qGtcpyLDaO0KSoiLNUbm1wxrVNSJrOrRDKpCczJFn5IHIa0KLSQbl83x5aErCriN1UZT3EqBu1vXRCrNv6lg8eKoCESXqF1lobksyCQ1q97cqyIV11gVxEID1437aOlFTSXrDglDurHN2w97PSGKHW3ubw2i10KEuwqHCV/xOBf9KRqTrM2qXLcK8s8SaLogfKRim9a3MsH3yw2iPzl81MG4orYoKWsjFRZ1QKUXLmKsKHwkFtQaj+BUMWl3kwvHUhpwDYnanW52ktQmQcatFLm7qYJ01Uv1kqCixavbSyZqSyVlQ5DFmhO1RaO14s09lQmV0E7Y2L2mXiwLBRKRipKdOLIIedHcHsHhFAt6SOhPpLJIRT+dEGswjuOoJGtjBv2pMoGQhiApu02R+DKRoPwFgOtWFznOA94xaCopK1abX6cTbc2wRkWdUIWhRahkRqU5FRWFl+tK1NZVGt/C0DDv5ZDqoYsTKVANrUxMhJuUWKGXg8MboGEoT9RmkrLrWjB0m6oqJ363VX99DIvtRKtCr60sQl60H3Z7SWVARhW6dRtAUm62qugoP+eQPM4BdkwgMSq4XDdStbxp0+XUnwI4101OliGheFbRXqYIeMegYaK202gC7c5SDW2ddN81wxoVdcIwp0K5mW4uN+C06mpFm+5efzk4WHuANUQqNmiiMQVfIGqxoEWsVGH2OmhlQk2T0sFTESSSsnGi9my2Pnk97aaqwjbJJKRjqcHrueBYKNDkxAQmAX1d1qakTKOCH+8Ap/4U0Z9EgY+qIhVF6gEw8QrWRs7J4UicPdUVv2sm8zeqYBtsA0Rxgk2J2PD5EQtD9ScgUd+C5BVBeIxgjYo6IaNDCKDGgmIybnH0J5ZsVsVAFHMqghHgtqoJTbLKw5IIDqlqI1wl2AIRBHKKAVfVtVJamcjpHwfLKEqZyCUpu0b1J1F1rY56EdLNnKU/WUggSsr2SixqKpOULap+1pUnauMmjVSIylCV0HP5vI484ziOVGTkVFQlv83AJWpXZnhtA1qtZB7RpkRs2Do5CcwTtYG0sWsjFRaVQ6UXzmM6pTQS2WTMNuDz2XLTX8FAdMQCblV6EcQ8ER6b5L0wRYcrHijzBvKqTBXSyhzHSUqpVsXd5TW9FxJJ2UaTRmtm043KqSAyg69sSHMqZkCjUb7MpsV2Q/TaljknuC4QhsvkYKBwToWTcjgFdM4TK10D1UXSu31gOqXCEEXUnxhlS5VTETvR2iU2mkPkaCHzGb3mtq1xZUFUPNuUiA3nGCSmidpARBu3ORXWqKgTJvSneKOpUX+az6qlzvDJt0C1BXriTbZC/WnrIhWcxvVc4uXgjagqaWVAUkq1yqRJdn5VojYAjMfrCwe7rlJSttKJvyXzEF/fxcZCDYfbYJUuMypWL+b/zkvDE0Q8aK5WjwoTcFEMbcR9VfDqPEWK37F5VyUpq5B0Lw0sUbvq+X/TwddP2SSqc7sLOE5SUtbECcRH8Rbz9UXm1wxrVNQJk+J38USjqag9my29LVVN2lyIW1U3oxTIFjyGcbBdcrJAUgVC5kXjDcMqaWXsWvMZjSLMZxXlVHAeSmmidvR6Ok56BmtEgi/NUFOidkrt7Rp7sCw0EFXhypz3RNEG7m8nLw0vyqkghNDXfFv5KMZsSjfOVTi9+ETaPONYp/4kSspWGT1gidplSwdvG4Q+vyn1qBIGsmlFbSBZ6f0az/PWqKgR0s2NCB2fsyVsSIFqQobdflQVugbZO9mCx7BJIVFDOG4LaLXVC54YbapyImULpc5QXRGOKL+nMiomk42iP8VGbO05FWvMLbHYXCQkTUtWapOJYRRWf+pT58F0CiCieTJnRa+/TIKua86ZzQGnYSY7rlN/4nPdqlYdZLV74iJ727XGlQbeyVpVHaWi6ERV23Mkaieogdc4UXutv9rzvFcB+AD31ssAvNv3/b/PHfOnAfwzAH8UvfVB3/d/sLZGlgkTSVmd9yKxIa0wdBp7gsZAf4cOeKZDXjYU6k+Vhs+rBqOPMW+gzKiYzaqllQHLjcq4QgOU9xqqErUBSinYREnZKhOmZePd0p8sZOCpIMEIzt2nSjx3iUYFH5nsdJIOJ542q6Pxrgq+DfOZ+RhmOYlxpMJNfYbFvPJIhdNs0uKDNlKRTI7fpPsQGchObFQY9LEex/CYz9cWmV831mpU+L7/+wBeCwCe5zUBPA/gFyWH/j++739tnW2rBEY5FRpjgVscSq+6yoPnrPZ3qlMOAtSVh+dR4u8meS9MwUKnMm84l1NRKa0sui6Zz+BUTZUDopyKRToBmb0Ow/VGKtZBf2q1qEpb4rrXt9KqhQZ8TZOy5wXekcFQtPgjP95v3qL/39hffsakyIMKvfAdrg15jHQxp0JFS2W/rSowSdlJhelR0v0AACAASURBVI7BLYATrU9ksaCRr02K2DADmRkVOXIqpPWprhE2if70JgCf9H3/j9fdkMpgQH8iOq9yo0ETiGbchFSVpCyw9DZVKHtHVYokHt2Y3rVB3gtTMBqAxBvuiBS2SulPUXi57GJaPBKa3vO0R4ePXKwtUVtjVFRZO0Oi9kZm1zcsbqGBEKkoc15IzDkMq6g/Acu1gc9743T6q6Q/sXtDWDTYdAyzcSctfsfl9o0r5vdHkrJkm9e4MhBH0jfQuGIGciwpa2JU9JeFXi39aSPwdgA/p/jsSz3P+/cAHgD4733f/536mlUiDOpUxOFjyWQcb8AXs0pDp06vBwIkF4gqvQiyCE6VVbyrBqvdYJJTURWtjF1rVrFSWI/zGsoStXkPz1ojFXMQQpba//M54Lrl1QKQXlcy3hc2UmEhQaTURlgNolIlZTlqD8OiYE4Rm0MC3uFE5wCny+dUVKlOyCdq51DZiSMVspwKLmJede0Ilqi9zWtcGWDOnk28D70+cHbCJWqb0J/4XJ/rG6nYCKPC87w2gLcCeJfk448DeKnv+5ee5301gP8DwCsl53gngHcCgO/7GAwGFbY4G67rptpweeMmruZzHBwcKDczVw3gEsDg6WfgtDupzx+12ui6LhwHGLku7jxVIvc2wvTJezgBcKPTQnt/H49mU/QPBtit6J6y33SDO//s7AjHAG7cfUJ6LzcZJzduIjx6hN1+H6cAbh4M0I7aP7l9QN/b2cH5dILWzX3crOi3Hfd6QMNBr9XEOYBbT90r/V6GO30cAthpOBiREJ2dncRzDG7t4zz6u3/jZmV9SIfLGzdwBWCwfzPWnr9ouQha7ZXuRda9PN3ZwTwME8ecOADp9XB7i/pzHdi2MV42rm7u4xLA7U4LQwA7gzvYKXg/xHs5uX07nnPYPBR0u3ROGNyBm+M6s7N7OAaw13LROTjAo3GA/u0D7A4GOL91C+PxGIPBAIEbzTlP3st1fhOE/V4858ybDcw6XaO+QwjBIwCN6QQhgNt376LJ7sf+LZwDuH1jD8NJgN7tg8r65Hl/BxMSot9o4BLAwdNPo9GrqHr3hkB2L893dzEOQ+x3O/Fa392QOeBs/xamn/5D9DodunbcvZup0hgM7tI+3+viOFygd+MG9ir4PZs+V26EUQHgLQA+7vv+Q/ED3/fPub9/yfO8/8XzvIHv+0PhuPcBeF/0kgyHiY9rx2AwgNiGMKL4DB++qEziCY+PgGYTw7NzqeFBmk2MLy6A0RXQ7aWuUQbImPLAzx8+BG59BgAwWhCMK7qnpNHE+PICU+785MUXAAAX0xm683klv7MqhA0X5PIC58dHAICzqys4UfvJiHplzo6GCK8uETrNyn7bggAIRpgfPgIAnIwnuFPyvSSEAI6Dq+MhyHyG8WyWfI6jpTTxaDqrrA/pEE65cRd5w8KLc5Cmu9K9kI3xxHUXIch0kjhmMRrR8b1F/bkOZN3Lxx3hhKopHX/6UwCAq/kCQcH7Id5LNgbPhofxPBSeHAMATi4v4/dMEK8Nhw/hvPAACBcYEQfj4RAhaYAEIxweHoIc0qX8ZDzJdX6jNoS0INnV0RDk6hJwGuZ9x3URjq4AAMfnF3BAI6lhMKbvvfgCMJ0iIAR7Fa074WwGMp/j6oie++jiCs5VkPGt7YZ0PzRfgEwnOH3hAQDgYrbA5YbMAaHTBBldYnRxAQAYnpxQqVkNyJxGNU5eeB5kNkUwm2NSwe9Z11x57949o+M2JafiG6GgPnme96TneU709xtA23xUY9vKg0LpKIEo9KqkZTCqUFBhsRjGWR2P6slt0NGftpFvGqlAkEz6U8W0MkZ/CqrjrDqOEyW1RepPqkRtoFqlJR2kxb9qCE9bSVkLU7A+cXEGQCEpXhRVqD8FQVrhqdcDSEjpW1VKyjYaNFl7XIBm4ra44nfL7znC/a+W/sQkZSl1LGuz+tgiRX/aoJyKDkvUjiSLTZ4Ra//oiq6Flv60Hnie1wfwZgB/kXvv2wDA9/2fAPDnAHy753lzAAGAt/u+T9bR1pXBb266imOy9JpZQbOyebc8eB1wVjejyklWUiSsUnWrqsH0qmUKK7Gs4ZgWiKry9/GSso5DK4VWAZZDkpmovS5JWUXxr6o391ZS1sIUbF64iALzVeRUlFWnAqDjnTkrmKHBrxtBQIVF2u1ibc4Ck+/MO47dFug2AlL1p/j+15CoXXnuxqaD5bpt4lrPDORgZJakDSydsawPXdN5fu1Ghe/7IwAHwns/wf39XgDvrbtdlUBX6C0CCTIKH7ENeJBhfKyCWDZQsnBUAVml8W2OVHR79BlPaEg9VWQJAC7Pl8dWBCbZh6iYU2UesW6P9ttFCDSFa2xKojaQGHekjs29oj7GddUvt9DAFeaFCiIVZDZHHP8uWvyx1abGQjBaFm+LK2pzog3MC1+VEAJz3OQdx/yxTUmdiuj+V+pEazSBcJEsHHgdwdSRLinFaLMkZaPnf3VpblSwZ8nG8Loi82vGNY27rQkm9KcsjXLXXVZJrmjD7TSb1MM0Dpah4k4NNB0emygzZwr2XFgoXaaHzj6ruqI2M0CrfH7dnmGkYs30p4SnVtLWKq4rjnUrKWshA+sTMf2mxPGqitTxnxmC0h37S8MBiOcwhy+EOa6QngtQR8Y4iFXcjMHm4qabdLKI83KV6w5Tf8pyID7uSFHONmitj9pCrs5zGBXCut+8ns4ja1TUCVWhNx5ZIdGYh1hhQTog8gQFXN2MmnMqggBwGoBEAWvjwZ7fZTS5yHIqos+qppVhNqPPsMrn1+tTHikgqai9fL0uD318XbH4Vx30pzAEYbKEgC1+ZyGFI8wLtdCfGo10sUoT9CK6o1j/hqM/1TLnFMmpYGuw+J2WeP+rNCqiez663KyNdN1I9fnNuRexgXx1ae586onr/vV0HlmjokbEyWCiV55HVuGdSM8cVU/aXXHhqFi3W5ao3aswfF4h4iqyLKzLTy51hnxdl/aVoOLK3d0+cBX9HnGTIqMY1A1XYswvcno4V7kunyC+sDkVFhLEG6wK5oW4/3P9MK+Hn0dEdyRifQG+Zk2V9NyoDWAFN/P8Dnafxe80xXm54pwKgM6Z1zlSwa+FnW4xA7cqxPSnC7Nq2gB1gDqNZR+6poIc1qioE7IiRCKyNoCxok/1kzbhErUrvZak8nDlv69KsNAp41ZKNtbxZ1XTyuYzSqmrcPFyuj3q0QE2M1E79tTym6qa1J8AakgwzGykwkKCaINVybygilQU7YeM7jgWE7WjeY/RZqvMF+suE7VzRUDZRk8RqSA15LotjYpLOFXO/5sOfi3cpCRtYNmnry6M6U9MCZH1oeuaO2eNijoho2GIyJqMWy0q2Vel+hNAN6ETLqeiW5FyECClP5FJxZzcKsFzK7O4u1XnVMzm1RtovT4QMPqTJlF7XZ4b2aaqLklZ8brz2bVN4LPQgOeXtzs0r60slG1U9KKcCibi0WGJ2smcikqpnSyvI7ekrJv8P36/RklZNicGV9c8UsHd801zILL25FF/AqgxciGhPV8jWKOiTmQkapNwESVq6+hPbj3UGRZeDgKg06s0NOnIElqDiuldVSJO1D5PTyxxQmYF0pEiWi6Vxbu6rHiB7wEkUnlORSo2gP7UUm3uqzYqkvQnEoYRXeN6LjYWGsQbrPPy5z1VTlHBfugwaux4lJSNZe0ORtXP3yynopCkLNLf4e8/UK0Tjc2JhGyv46wEOPw93zijInouhOQzKrr9ZR+6pvO8NSrqhGxzw2McSZBqJmPHbXFypBVSWpg3KsptqBQyo6Li8HmliBO1z1NeacdxIsOwBj10Xiax6gWeIZVTIYnS1A2Fp7by8LQ43hcFZTwtHn/wY7Xked1pNOjGqMxIRRAgVajVbdENMzM4qo5UsFoPRSRlU84e7v5X7ERLzImbJKNaN1o1rU9FoFvTsr4XS8pez3neGhV1IktS1kRGlSVqAxWrP/WWkrJVb+5VkrJba1RE7VZ5pblniE6FHjE+h6fKe8nzglPqTxsQqYh1+kvaVBnCEce7rBiihQVQ/VgVHDdklUhdh1sbuM0X45THNSyqXp+A/JE/ZU6Fuzxf1esOPydu6xpXBhJ9fsOMCmYgA/kiFZ3etXceWaOiTsg2NzxENQ3pOfgJqWJPEJMGrHrAt+SJ2pVSdqoEbyiojAog8ohVOAT5a1edU8GwkXUqJOo3ddSLEI2KuIqxzamwEOBWvNEUo8GrSBv3esAkAJFJonZ7IKcny+OqAn/uHOPJUUnK8q+rjh7wc+K2rnFlgLvnlTpICyA2kIF89YwK9svHCdaoqBOyzQ2PSL7V0U1qicmvYp78fFYNx1eEUlJ2Oydcp9FYTkg6o6JyWlk9BmjC+BONpA2tqF1LvYg4p0I0Kq6nB8tCg6rnddeVqJ8VlZSN2nd6nDYqen3gZJg8rgIk5pwi9CcxStOsyVkHJOZErXz84w6+/20iDSw2KswjFYX75WMEa1TUCZnEJI+Y/pRRp4KhDu+zbOEoG4KkLAnD6iuyVo3YqJAs3Oy9Ou4rQ6U5Fcvf4aj034H1qR7JJGXrqBchSkjPr3dY3EKDFu+1rcKoECMVK+ZUAHRtEOeVbo++z/6uCr2CmzeuojaPONdNPHcFcOo0YDYZib3MBt4H1g9yqT9Zo8IaFXUiVuFQRCpMcipaNQ1E1obzk+pDk5FKEVlElYen42QbthHs2egiFRVPpE6rpvAyf+5NTNRmxkxCUraOSIWgusOuf00T+Cw0qNpZJNYCWrH4HQC6Noh1Frp94JzSnyr1wvP3qIxEbf69qmtH8HPiNq9xq6IuB2lRsDblSdQu2i8fI1ijok7INjcc4gqlWRW1GSoMGTq8pFrVXgSRnhIIRZW2ET0Do6Lq0HddniD+3BucqM36F1ksqNRu1ZETUf0p+v+6FkWy0KDqed11k7l8q0rKAnRtEOYwp9dfykvXNefkGcfRb3Zkhj37rPKcig2n/dSFBP1pAyMV3QKRiqL98jGCNSrqRJb6U2CSqL2G5Nu6Nr/svpgkrG862LORLV7svVrpT2tK1N6EnIqmYMzXldtg1Z8sDOE0m4ATLcc10Z8KG7f8XCK2lZ/TakvULhKpkNFS64kgJ+bEbV7jVkVdrIuCiCNtuSRlbaTCGhV1QtzciIg30waJ2u1OtVra/CCvK1LBNl1xwvrmTTTG0CZq035QOa1sHZEKMVGbeXmazWqVrjSgfOlW/QnTTO0tNmZYTsX19GBZZKBVIae/JcmpKErD48e7aDjwc1qVNKKiCbHsHsu+w+5H5cIk3Lq9zWvcquDVnzYxYlNE/aljjQprVNSI1OZGxDgA2m3qtVKhyoWHB784VJ5TIUYqIvpT1dzWChFTBNbpEatapjKC0+I1vYUEyEaDemDXPcHym6q6IgY2p8IiD6rk9JeZqK1bG7o1Rbhb7diBkSviYpJTUVedimZz/fPiOsH/9k1c66N+oN2PCUg4Qq+p88gaFXVD9BjxCAwUj5o1bUh7NYWxATX9aZu9OFHbpQteXTkV/Oa16muxPiKbgJuNfN6eKsBvquqqFxHnUDH1p+i6zWu8kbBQIxojlURoRWdWGepPQHodSnxWoSPDcTgxjDw5FdGx0pwKpspXNf1pSXOLq5FfR9SlTlgUq+RUNBrVMkk2GNaoqBuymgwMJoXm6pIj5dpRdRG6WIaUJdIGBipYmw5tpKKmxYttXptu9R4x3QTcdNfvnefHXV3Srk3BWGbXvaYJfBYZqDLXynWX0sbAaupPvFdZJikLAK129YIEOjEMFVxuTlR9Vlf9oG1e38rAphcBXEVS9hpHoKxRUTdEaT8OJBhlTjROXUm+nS7AvCh1cf/ZfXmcErUlk4tTV5g9psr1qveI6eT3GhsQ5uertjMVpqoNHaFfk7pyOSy2ExXSIp0S6U+O61L6ESQOJ/a6jg2zLm9NBS39qaZcNzZHXnOjIqaDAxsaqVhBUvYaz/HWqKgbOvrTJMgeXDUNwkSZ+trVnx6DSEVPM7nU5hGrSXcd4CIVEg/gJnCH3RaXMF3T5l6UkLZGhYUOVc4LHP2JELJ6RXk2NwtzdJxwW6dRkcc5oKqozX9WeQQ52qRu4ka6btTlJC0C3ZqW9Z11R+bXCGtU1A1+cyPCIFIRa2nX4cWPvU5rUH9yW9V7kquEbnJp1fQM6/QC6ULFzeb6k9ZcN53bUPHm3mk0KX/aSspamKDKTW0iUleCChkb7yn1p37y8ypRgGoSrynrzHVr1kR/3Qa4LSrk0e6suyUpOLo8QRVspMIaFbWD39yIGAfZSXp1ebkBLlJRE8eUpz9tuRfH0S14dRe/q2HxiikDSqNi/ZGKeFNf5+ZeliC+zcayRXWoUtLUdcvthyqHU68mRxR4hb2S6E+tmuZLplq15WtcKXBb9dBzi6BAorbTatHftO71bo1Yq/vQ87xXAfgA99bLALzb9/2/zx3jAPgxAF8NYATgHb7vf7zWhpaJLEnZrEhFneFCdo1OTSpFPP1pE8OheaCtU1FXTsUa+sqmJmrztMM660Wsoz6GxXaiSq+tVP2sfPqT8v0qEM+x+Stqy77juC0Q/rxVoWkTtWO4LuBs6H3QrWlZ31t3ZH6NWOsv933/9wG8FgA8z2sCeB7ALwqHvQXAK6N/XwzgH0b/bye0krIm6k/1eZ/R61OvSrtd7XW4ImEOAPJYGBUGkYrKubsVSlSK0FUf3YREbbe1zNWpM2LQKnkzZ/H4wnWBbkVeW96oKCNSF0ck5PSnWum525pTse1rXBlwW3HS/8ahSEVt9r1rPMdvkjn1JgCf9H3/j4X33wbg/b7vEwAf9jxv3/O8p3zff6H+JpaApgucDEF+4yPJ98OQTvqZORU1Fb8D6ORah5Y2+01/+PsgrTYwfAjs7FZ7zarR03jRXDd5TEVwGg26gNXiNcxK1F7zVNN0gYszkN/4CMgnfzd6r4aJv+mCHL5Ix/vzn6aKamuqLG6x4YioINWc2wUWC5Df+DDI+dnyvYJwuj0QmWxsXZRZ/hp5xjH7zbLvuG49TrTYqLD0J7juRuZTACiWqA3YSMW6G8Dh7QB+TvL+fQCf4V5/NnovYVR4nvdOAO8EAN/3MRgMKmqmGVzXlbbh7M4TGP/uv0f4439L+r29Z55FT9P2sO3i0G3h5steiU7Fv/HimWcxPR3ioOLrhJ02DhtNkF/9EMivfggA0P3yN+NmdF3VvdxkhP0eDjtd3HjJc+gKbQ+eeRYX/V0MnnkpnIq9NIcHd9F/6cuwU/G9DJ57OS56fQzu3YfTSS4SJ3eeQPPOk7ixxmd4dvdJjH/ro8tx5zi4/ZKXonnroPA5Te7l0e0B5r/3mwh/7zcBAI3927hz507haz6u2MYxXjbOn7qP+WyK2yveB9m9HN17GhcAwh//n+L3bt5/SeE15PIlz2H82U9Jn9nh4An0X/JcPOdUheDZV+Civ4PB/ftwDDemi/DlGDYa2H/u5WgL7bu8/xKMn7iHQTQ+q+qT5MYeDvs72HvuFdq1/nGC6l6ePHEPjf5OvNZvEsitfRzu3cTu/WfQz9G+03svAZpN7Ff0mzZ9rnQIIetuAzzPawN4AODzfN9/KHz2LwD8bd/3fz16/a8B/HXf9z+mOSV58OBBZe01wWAwwHA4TL1PJhPgxc/Kv9RsAvdeQj3MGpCLc2B3r/IIApnNgPmsFvoMOXoEXF4s33jy6XhzqrqXmw5yeQ70d1PPk4QLIBjB2dmrvg2jS6DdiT2KVd1L3W8i44BWGF2jR4pMJ8AL3Ljb2YUzeGKlc5rcS3J1AQwfLd+4dRvOjVsrXfdxxLaO8TJBZlNgMV+ZOiS7lyRcAA8+DSxC+karBTz1TOE1hMxmwGwKp7+T/mx0CbS7y6KmFaHoPEouzuHs3Ui/P58B0+VvqrJPkqsLoNe/NlWX1fuhMQAn5YjaFJCrS0pJzJFXUfVvWtdcee/ePQDInDA2JVLxFgAfFw2KCJ8F8Az3+mlQA2Qr4XQ6wEtfvto5JBNiFXBardoSbJ2Du8DB3VquVRecXflzchpNoAaDAgCcfj00Mt1vqryYlAGc9urjrtB1d/Zqe9YW2w2n1a6MX+40msDTz5V3Ps3asAlzjvZ7ivXTqVG1pw6H0jbA6XTX3QQtnAI07E3/TVVjU4yKb4Sc+gQAHwLwlz3P+3nQBO2zrc2nsLCwsLCwsLCwsHgMsXajwvO8PoA3A/iL3HvfBgC+7/8EgF8ClZP9BKik7LesoZkWFhYWFhYWFhYWFgqs3ajwfX8E4EB47ye4vwmA76i7XRYWFhYWFhYWFhYWZrDahhYWFhYWFhYWFhYWK8EaFRYWFhYWFhYWFhYWK8EaFRYWFhYWFhYWFhYWK8EaFRYWFhYWFhYWFhYWK8EaFRYWFhYWFhYWFhYWK8EaFRYWFhYWFhYWFhYWK8EaFRYWFhYWFhYWFhYWK8EhhKy7DVXgsfxRFhYWFhYWFhYWFmuAk3XA4xqpcNb9z/O8j627DY/LP3sv7b3cxH/2Xtp7uYn/7L2093HT/tl7+djcy0w8rkaFhYWFhYWFhYWFhUVNsEaFhYWFhYWFhYWFhcVKsEZFdXjfuhvwGMHey/Jg72V5sPeyPNh7WR7svSwH9j6WB3svy8NG38vHNVHbwsLCwsLCwsLCwqIm2EiFhYWFhYWFhYWFhcVKcNfdgMcRnuf9ZwB+DEATwD/yff/vrLlJWwPP854B8H4ATwIIAbzP9/0f8zzvBwD8NwAOo0O/1/f9X1pPK7cHnud9CsAFgAWAue/7r/c87zaADwB4FsCnAHi+75+sq42bDs/zXgV6vxheBuDdAPZh+2QmPM/7aQBfC+CR7/uvid6T9kHP8xzQufOrAYwAvMP3/Y+vo92bCMW9fA+APwNgCuCTAL7F9/1Tz/OeBfC7AH4/+vqHfd//tvpbvZlQ3MsfgGJMe573LgDfCjqXfqfv+/+y9kZvKBT38gMAXhUdsg/g1Pf919p+qYdmD7QVc6Y1KkqG53lNAD8O4M0APgvg33qe9yHf9//Delu2NZgD+Gu+73/c87w9AB/zPO9fRZ/9qO/7f3eNbdtWfIXv+0Pu9fcA+Ne+7/8dz/O+J3r93etp2ubD9/3fB/BaIB7fzwP4RQDfAtsnTfAzAN4LulAyqPrgWwC8Mvr3xQD+YfS/BcXPIH0v/xWAd/m+P/c874cAvAvL8fxJ3/dfW28TtwY/g/S9BCRj2vO8/wjA2wF8HoB7AH7V87zP8X1/UUdDtwA/A+Fe+r7/Dexvz/N+BMAZd7ztl2qo9kDvwBbMmZb+VD7eAOATvu//oe/7UwA/D+Bta27T1sD3/ReYle37/gWoR+P+elv12OFtAH42+vtnAfzZNbZl2/Am0AXxj9fdkG2B7/v/BsCx8LaqD74NwPt93ye+738YwL7neU/V09LNh+xe+r7/K77vz6OXHwbwdO0N20Io+qUKbwPw877vT3zf/yMAnwBd6y2gv5eRJ90D8HO1NmpLodkDbcWcaY2K8nEfwGe415+F3RQXQhQmfR2Aj0Rv/WXP837T87yf9jzv1vpatlUgAH7F87yPeZ73zui9J3zffwGgExiAu2tr3fbh7UgujrZPFoOqD9r5czX8eQD/J/f6Oc/z/p3nef+353lfvq5GbRlkY9r2y+L4cgAPfd//A+492y8NIOyBtmLOtEZF+ZBVHbQSWznhed4ugF8A8N/5vn8OGtJ7OSgN5QUAP7LG5m0Tvsz3/S8ADZF+h+d5b1x3g7YVnue1AbwVwD+J3rJ9snzY+bMgPM/7PlDqxD+O3noBwEt8338dgO8C8L97nndjXe3bEqjGtO2XxfGNSDpibL80gGQPpMJG9U1rVJSPzwJ4hnv9NIAHa2rLVsLzvBboYPrHvu9/EAB833/o+/7C9/0QwE/Chp6N4Pv+g+j/R6B5AG8A8JCFR6P/H62vhVuFtwD4uO/7DwHbJ1eEqg/a+bMAPM/7ZtBE2f/S930CABFV5yj6+2OgSdyfs75Wbj40Y9r2ywLwPM8F8HXghC5sv8yGbA+ELZkzrVFRPv4tgFd6nvdc5Nl8O4APrblNW4OIf/lTAH7X9/2/x73PcwT/cwC/XXfbtg2e5+1EiV7wPG8HwFeB3rcPAfjm6LBvBvDP1tPCrUPC42b75EpQ9cEPAfivPc9zPM/7EgBnLORvIUekNvjdAN7q+/6Ie///b+9eYu4Y4ziOfy1EwsKtJVHaECSsROISRLqQCO0bEvpDgxAEISxcEjZ1W0gTgiZsELRp45+4tVqlLY2IS10iEeLS4KWuKV1ZiMprMfNycry3mrd9+8r3szrnzPPMeWYyc3J+mXn+M7MtLECSI2gmcn45NaOcHsY4p1cCFybZK8nhNPty064e3zR0BvBpVW0Z/sDjcmyj/QdimvxmWv1pkrUVOK4HXqYpKft4VX08xcOaTk4FLgE+SvJh+9ntwEVJjqO5rPc1cPXUDG9aORh4Lgk05/ryqlqb5F2gklwBfAMsmMIxTgtJ9qap6NZ73C32mBxfkhXAXGBGki3AIuBeRj4G19CURtxMUx7x8l0+4N3YKPvyNmAvYF17rg+X6DwduCvJdpoyqNdU1UQnJv/vjbIv5450TlfVx0kK+ITmFrPrrPz0j5H2ZVU9xr/noIHH5XhG+w80LX4zfaK2JEmSpE68/UmSJElSJ4YKSZIkSZ0YKiRJkiR1YqiQJEmS1ImhQpIkSVInhgpJ0k6R5KX2wWyTuc47kiybzHVKkrrzORWSpDEl+ZrmuSe9tfmfqKrrx+pXVWftzHFJknYfhgpJ0kQMVNX6qR6EJGn3ZKiQJP0nSS4DrgI+AC4FfqB52vCGdvlGYFlVPZrkSOAx4DjgD2BDVV3QtjsFeBA4GvgcuLGq3myXHQ48ARwPvA181jeGk4H7gWOBwbbvxp21zZKkkTmnQpLUxUnAl8AMbGUZrQAAAi1JREFUYBHwbJIDRmh3N/AKsD9wKLAEoG27GngIOJAmIKxOcmDbbznwfrv+u4G/52gkmdX2vQc4ALgZeCbJzMndREnSeLxSIUmaiOeTbO95fwvNFYefgQeqagh4OslNwDxgaV//P4A5wCFVtQV4o/18HvBFVQ23X5HkBmAgyavACcAZVfU78HqSVT3rvBhYU1Vr2vfrkrwHnA08OQnbLEmaIEOFJGkizu2fU9He/vRdGyiGDQKHjND/VporDZuSbAPuq6rH27aDfW0HgVntsm1V9VvfssPa13OABUkGepbvCby2IxsmSerOUCFJ6mJWkj16gsVsYGV/o6r6kWb+BUlOA9YneR34niYc9JoNrKWZo7F/kn16gsVsYPi7vgWWVtVVk7lBkqQdZ6iQJHVxEHBDkoeBc4FjgDX9jZIsAN5qb33aRhMM/mzbLkmyECjgPJpJ1y9W1db2dqY7k9wOnAgM8E9oWQa8m+RMYD3NVYqTgc3t90iSdhFDhSRpIlYl6X1OxTrgBeAd4ChgK/ATcH5V/TJC/xOAB5Ls27a7saq+Akgyn6b60yPAZmB+VW1t+y2kmR/xK/AW8BSwH0BVfZvkHGAxsIImpGwCrp2sjZYkTcweQ0ND47eSJKlPO6fiyqo6barHIkmaWpaUlSRJktSJoUKSJElSJ97+JEmSJKkTr1RIkiRJ6sRQIUmSJKkTQ4UkSZKkTgwVkiRJkjoxVEiSJEnqxFAhSZIkqZO/AI/VBSoiEF3yAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 936x360 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAw8AAAFRCAYAAAAyx5F4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzsvXmcJGV9+P/+zMzO7k7P7DXTvdsLLizXcgkCohBQOdQYggdGy9uvJl7RKCYeSdSf8tLggRow8QqKYhIVS02MZ6KIoiKHICDHcskusLu923PsMdNzTz+/P56qnuq7+u6e+bxfr37NdNXTVU9VPVX1fG4xxqAoiqIoiqIoilKOrlZ3QFEURVEURVGUzkCFB0VRFEVRFEVRQqHCg6IoiqIoiqIooVDhQVEURVEURVGUUKjwoCiKoiiKoihKKFR4UBRFURRFURQlFCo8KEodEZFrReT6JuzHiMirG72fTkBEjvTOx7mt7ku9afV1FpEuEblTRF7Sqj7UCxHZKSIfaNC2m3Lft5J6HaOInOeN68Pr0a96ICKXicgjre5HGERkQET2isipre6LsnxR4UFRyLwYTYHPRIWbuhR4aSP6WG+8F6Z/nGkRSYjIf4vICa3u21ImMHkq9bnWax4HvtPC7r4eEOC7/gIROUJEviYiT4jIjDeRuV5EntO6bi4iIh8QkZ2t7scSpGOebVXwKeCsRu5AROIi8nURuU9E5osJYl47V0QOeZ/rRCTmrzfGjAP/DHy6kf1VlFL0tLoDitJG/BpwcpalK9mAMeZg/brTFHYCZ2MniE8CrgB+LCLbjDGzrexYLiLS2259KoeIdAFijFkILP4tVijweSfwKuDMwLIpAGPM3oZ3sjR/C3zeeNVERWQFcD3wBPBK4HFgI3AeMNiiPiolqNd904HPttAYYyaAShVFlbISGMNO/F9KgfmX97z4Ifa98xzsc/nzwPdE5ByzWNX3WuByETnZGHNvg/utKHmo5UFRFpk1xuzN+ST9lSLySxH5ioh8XERGPK3Ql0VkdaBNlmlfRE4Skf8TkQMikhKR7SLymsD6uKdZOiAiU94+nhrslIicLyJ/EJFp7+/5uR0XkY3evodFZFxEbhKRZ4Y45gXvOBPGmNuw2qwjgW0523+5iNzl9WGniPyziES8dc/2NNB93vdVXrvf5BzDvIis8b5f6m1vwtNcXyci8UB7Xzv/5yLyGxGZBt7krXNE5BFvH78FTil3kGJ5t4g8KiKzIvJHEXlnYP3lIvJggd99QURuCXw/Q0R+6vV7WET+S0SOCKy/zOvby0TkAWAWyLLkGGOyxhl20rKQM+4OetvLclvyvr9dRL7ljafHReQlIrLW02qOe8f4FznHUfH4EJGnACcB3wssPgk4BniHMebXxpjHjDG3GWOuMMZcF/jtThH5iHf+DopIUkT+RkRWisi/ish+EdktIn+Ts88w98NZIvIrb/1+EfmGeJpZEXkd8BHgCFm04lwW+HmviHxGRMZEZJ+IfEpEunO2/3YRecAbXw+LyPtFpCewfn3g/O8TkX/CTvJKIiLbRORH3tiZEJEfiMgx3ro1IjIpIq8scD4WROR53vceb4zt8Pp3n4i8Oec3RkTe4Z2Xg8DXC/SlT+w9++zAshul8H18kfc999l2rViL05tE5DGxz8P/EZFogfO5yzu+/wO2FOjPRSJyh7f/pIh8XhafL8d4x3RMoP1jIrIr8N13XTyxyLlfIfaZtcvbR0JEguM1y21JilsEj/TW93vjaLd3XHeKyIsL7dvHGLPTGPN2Y8w1QDGlwLOB04FXG2NuNcbcArwGq+B5VmBbSawSQl1XlZagwoOiVMZLsBrWZ2C1xS8APlGi/TeBUeBPgCcDfwfsBzuhxU7MjgcuBp4G7AN+JiJDXpvNWE3UHdiXyruAzwR3IFZ4+QUwAPwZcBrwY287oV2QRGQDiy+j2cDy1wFfwAoWJwKvxb7kvug1uQkw3jkBOAcYB54mIv3esguA240xhwK7fLd3Ti7BTiiuI59PY60hJ2C1b6d57b4NnIp1N/hMgd/l8lbspPLj2AnwJ4GPi8hfeeu/BhwnImcHjrsXa4n6mvf9ROBG4Gbgqd4xLWDP86rAvjZ7+3sd9nw9FqJ/lfB+7PU9FTs2/h17Tn6GvfY/Av5dRAa9flc7Pp4F7DbG7AksS2KP+SXe+SnF24GHgTOAf/E+/w3swFpZPgv8iz/hC3k/bAJ+Cuzy1j8fOJlFt6pvYe/HXVjrThw7RoJ9SgBPB96Btfq81l/pCRrvBv4RO+YuBd4MfCiwja94x/R87Bg4EjuGi+Jdg58Cq7Dn9VlAP/C/Yi0Dh4D/Af5fzk9f5Z8D7/uXgRd7fToB+DDwicA49vkQdpyejh0vWRhjJoFbgQsD/TsLOET2fdyNtcgW40zgfODPgecBTyFwvkXkhcCVWG37UwAXe+8Fz80pwPeBX3lt/h/2+n/R6+sjWAuX39ejgRiwVkR8JceFwF5jzP1F+vl27L38auBY7HP7liJtYXHsxLH38/XAdmCfN05/gL3/XoYdf18ArhORC0tsMwznADuMMRlFhjHmPux4zo3puhV77hWl+Rhj9KOfZf/BmoHnsVrg4OcHgTa/xLr5dAeWvQmYASKB7VwfWH8QeF2RfV6InXSfGFi2Eju5+aD3/Z+wk8+eQJuLvd+92vv+OuzLpSdn+zcAV5U45suw5vEJIOVt0wDfyWm3E3hLzrJnem3XB87NFd7/lwPXAPcDF3nLbgI+WqIvp3nbO8z7fp73/TU57f4T+G3Osr/x2p5bYvtP+P0LLLsSeDTw/RbgC4HvL/au7YbAtb0uZxsrgUngRTnndEsFY+8DwM4i6zLXOfD9qsD3qLfsXwPL1nvLLq5xfFwF3Fpg+Vu8MTPlXddPAE8tMGa+F/jehZ2Y/iBn2X7gbyq4Hz7iHUtvoM2p3u+eWep8en36fs6y/wW+6f3f513L5+W0eS1wwPv/GG9fzwms7wV2E7jvC+z7r7xtDwWWbfTO4Wu978/DPoM2B9rcDXzS+3+rN7aOz9n2B4G7csbINSHG3WXAbd7/zwH+iHWRCd7Hvw20v5bsZ9u1wDCwMrDsH4BE4PtvgK/n7PdTXh8P977/h9+PQJsXesd6RGBfrvf/G4GfYwXgt3rLvg58o8SxfgY73qXEuXikyLrLsQLcVu/7ecA0sDan3VcIjPky5z7rXAaWX03O881b/jvgcznL3gEMh9mffvRT749aHhRlkVuxmq/g5805bW4z2f7rN2EnD0cX2eangC+Ldb+4TEROD6w7CRg1AW2ZMWbG68dJ3qITvX3OB373G7I5E9gEHAi4RExgNYjHljxiO6l+ClaT/g7gAeCv/ZWeC8IRwD/nbPsnXhPfleAGrBYW7+/PsdruCzzrw5leG3+754l153pCRMYDx5RxAfK4Lef7idhzHiT3fGQh1lXqcKxmM8iNwJG+mwZWg/+ygEb9NdjJ7pj3/UzgkpzzMIrVJgfP8z5jzOOl+lQjd/v/GGOGsZaAPwSW7cdajvwgy2rHx2rsJCkLY8wXve39BVYj/izgNhH5+xL9TGMnmn/IWZYM9DPM/XAScIsJ+PAbY+7GCul+m1LclfN9N3YS7297NfDdnPP0b1gtdxQ7/sC6jPj7n8VO7kpxEnC/MWYk8Lt9wIOBfv8Mez5eBSA2m84p2HEJ9h4V4Pac/r2P/OuYe98U4gbgdBFZS849662/gMA9W4Tt3jXyCZ5PsOfrt9k/ybtfT6LwvSksnu8bgPM9rX+hvp5fpq9fxVo5HxGRL4rIX4SwnCHWxfTvsMqBHd7iM/EExpzr4Fs1GoXJ+T6NHa+K0nQ0YFpRFpky1kReCSV9nY0xHxGRr2O1ihcA7xORK4wxfsrI3BeCv01T4P/MZnO+d2FN6oVcJyZL9Q+YCxzzdhE5DOv64b+UfQXDpdiXdS6+3/ENwIdEZAvWpeMGrNb+A9gXfRpv0u+1+TFW4/hhYAQ7ub8e+1IOksr5Xuh8hCX3d7nX7jqsNeL5IvIL4CKys8t0eX3+eIFtjwb+z+1zvZkLscyweO2qHR/DWHe7PIwNMP2x97lMRL4MfFhErgxM7Av1qVQ//e+55F7zYtc/zLjIDRzOPU9gr/lDBX47RojYhhKUPDZjzIL3rHgt1rXntcCdxph7cvr3J+Rft9xthxmDt2Dv0fOw9/uV2Hv8m949+lQKuDzlUOh85p6jMNel3DX9OTCEFabOx1oS5oB/EJGTsO5FRYUHY8xdIrIVa2Hxf/8RETnLZLtSZhCb+vlqrOX45sCqLqywemaBn9UamJ7AuoTmspH8OIkN2HtUUZqOWh4UpTLOlOwAy7OxL4w/FvuBMeZRY8znjTEvwboY+Jr9+4ChYJCfiKzE+nLfF2jz9Jx95vq+3g4cBRwyxjyS89lDZVyBjVX4C6/v+7DWiW0Ftv2IMcbXTN+KdcH4IPCwsYHAv8Bq+16K1RZPeW3PxGrM3mmMuclY/96gtrIU92H9goPkfs/CmxzsIhBw6PFMrH/xpNduDBtD8Frg5dgJwk8C7W/HTl7+WOA87A/Z/1ZQ7fj4PXBsGA0tVjjpBdbW0M+w98PZwT55Gvq1gTazWF/9avY/DRxVZKwvBPaREaq8vhSaSOZu+yQ/dsP73UbguMA2wcbXnCw2SPwV3nefO7y/Wwr0rejzpxiekHcTVqg8HbjBs4zch72PF8i3GlTK/ZS/X+8j/958FlZwuN/r625s/Mzbsc+O24E7sYLK3wKPGWMeLdURY8yEMea/jTHvwApGJxTYLwAichQ2PuefjDHfyll9O7AOWFXgOtRqcbwJ2CoiGQuGF5f0JPItNk/2+qIoTUeFB0VZpFdENhX4BDVpg8DnROQEEflzrA/2l4wxeZo+LyPH50TkAhHZ6gX7Pg/vhYjVlN0GfENEzhGRk7EuCquwAXh4f6PA1d4+L8T64Ab5OjYI9Uci8lyxmUeeLiL/KCIvquQEeBPoa4B/Cggs7wfeITZ//slis8a8SET+LfC7OezL7f95x+Vv6x6s+09QK/gwdmLwLu+8vAg7WQnDldjJ4+UicpyIXIINIi/Hx4C3i8gbReRYsRlq/hr4aE67r2GDit+K9YUPaso/ip1w/KeIPM3r+/lis64cFbL/raDa8fEL7HV6ur9ARE4TmyXI8cbCUSLyMuC9wE2eG1W1hLkfPgusAa719n8u1hr0G2OMH9i7A9gkImeLyFDALa0knjXlo8BHxWaG2iY2W9rLReQTXptHsMG9n/Ou/YnYIOaBMpv/BlZL/C0ROV1EzsBaunZjLX1+H+7FToq/hL3vvxlY9wjWr/5LIvIasVmIThWRv5R8l7Gw3IB1k3rALGaWuwF7H/82oByolk9jXQEv9e6712OfB0E+iXWf+mcROV5sZql/xcZKBCfjfr9+ZYyZ99zebiTwzCmGiLxHRF7lXc+twF9ihaM8C5PY4PEfYgPOr8l5F3R7+7oe+C8RucS7B84Qm1XqjWX68RSxWcw2AP2B7z7XY4V2/xnzdOz4vsU7Vn87glV+/KjU/hSlYTQ6qEI/+umEDzaAzRT5DHltfol9eX8S66Yy7n3vy9nO9d7/q7CThh1YjWYSO1F4UqB9HDuJOIDV3N9IfvDphdhJ+AxwL9bFIDeQdhA7wdqN1bzuxmrOTitxzJdRIEgQG3cwB/xVYNmLsC/TSWzg6114QayBNu/x+nVJYNmnKRDMDLwNa9GYwgodz/PaneetP49AUGXOb1+OtfT4/vAvLLSPnN+I178d3rE9irV85LZb4V0nA5xRYP2TsVlx9nt9fwTr2uAHVRc8p2XGXqUB06/OaTNPTlC+N97eUMv48H73VeDqwPchrAB3F9Yyk8JOwK7wz4HXbifwgZxtPQJclrPsAax2t5L74Sysj/yU1+4bQCznGn4D62Zk/H0W6dOXgV/mLPsr7/imvet8K/DXOefS9Y59GCuYfo0SAdPe77Zh3bz8ZAw/BI4p0O5Sr98/KLCuGyuo+WmAR7xz9NJSY6REn8702n8msOz53rLcc3Ut+QHT1+e0eTVgChzPbu96XY+d7Gfd21gXwTuw9/SwN1YjOdt5qfe7vw0se3uY48XGrt2BfXZNYGNUXhhYfxnefYvNnlXsXXCk12Y11n1xh3cd9mKD7y8o04+C281pE8dmkxv3+vstAuPba3O+Nzb7Su1PP/pp1EeMCeOOqCiKiPwS+4J5Q6v7oijNQGxazNuBk0zlLnCKojQAEfkxcKMxplSacEVpGOq2pCiKohTEWF/6N2PThCqK0mJEZABrBb6q1X1Rli9qeVCUkKjlQVEURVGU5Y4KD4qiKIqiKIqihELdlhRFURRFURRFCYUKD4qiKIqiKIqihKLTK0yrz5WiKIqiKIqi1IfcKvF5dLrwwJ49exgaGmJkZKTVXVmS6LltDHpeG4ee28ah57Zx6LltHHpuG4Oe18bRqnO7efPmUO3UbUlRFEVRFEVRlFCo8KAoiqIoiqIoSihUeFAURVEURVEUJRQqPCiKoiiKoiiKEgoVHhRFURRFURRFCYUKD4qiKIqiKIqihEKFB0VRFEVRFEVRQtG0Og+O41wKvBFbfOJLrutelbN+LfCfwBavX59yXferzeqfoiiKoiiKoiilaYrlwXGck7GCw9OAU4GLHcc5NqfZ24D7Xdc9FTgP+LTjOL3N6J+iKIqiKIqiKOVpluXhBOAW13UnARzHuRG4BLgi0MYAA47jCNAPjAHzTeqf0saYuVnM7TfB3GxmmZx0GjIYa2GvFKX5mAfvwezbU3hlVxdy2tlIpL+5nVIURVGWFc0SHu4FLnccZxCYAi4Cbs9p81ng+8AeYAB4meu66dwNOY7zJuBNAK7rMjQ0RE9PD0NDQ43s/7KlHc7t1K9+yqGvXJm1rPfcZ7PuXR9uUY9qpx3O61JlqZ5bMz9P8qrLYH6uaJu+2Wn6ndc3rA9L9dy2A3puG4ee28ag57VxtPu5bYrw4LrudsdxPgH8DJgA7ibfqvCnwF3ABcDRwM8cx/m167qHcrZ1NXC199WMjIwwNDTEyMhIQ49hudIO5zb98AMgXXR97Gro6ib9lSuZeXxHy/tVC+1wXpcqS/XcmuQemJ9DXvYG5Ixz8tanP/4eJnc8zHQDj32pntt2QM9t49Bz2xj0vDaOVp3bzZs3h2rXtIBp13WvAa4BcBzno8CunCavBz7uuq4BHnEcZwdwPHBbs/qotCnJBAxGM25KsnkL5uYbMMYgIi3unKI0iWQCADnyGGT9YP76jYdhvDaKoiiK0iialqrVcZyY93cL8GLgmzlNHgcu9NpsBLYBjzarf0r7YoYTEIsvLojFYWoSJg4V/5GiLDEygkHwXggg0XhGwFAURVGURtE0ywPwXS/mYQ54m+u6+x3HeQuA67pfBD4CXOs4zj3YdK5/77qu2sMUSCaQM8/NfJVYHOMtZ2Bty7qlKE0lmYCVq2FgXeH1sThMHMJMTiB9GjStKIqiNIZmui09o8CyLwb+3wM8t1n9UToDkxqH1DhEA9rW6Ca7LplAjj6+RT1TlOZikgmIbirqqifRTVaoHt4LRxzT1L4piqIoywetMK20N8m9gLU2ZBjcCNKlLhrK8iLXfS8Xb53GPSiKoiiNRIUHpa0xSS+nfWDSJCtWwIYhFR6UZYNJL8DwvmwhOhfPIqf3haIoitJIVHhQ2pvhBIgsTox8YnEbSK0oy4GxEViYL2l5kJWrYN0GFR4URVGUhqLCg9LeJBOwfhBZ0Zu1WGKaWUZZRvhpWktZHsAK1XpfKIqiKA1EhQelrbFBogUmTLE4pMZtQLWiLHEyAkGheyGAROPWWqcoiqIoDUKFB6W9SSYKalszy7yAakVZ0gwnYEWvdUsqRSwOB/djpqea0y9FURRl2aHCg9K2mKlJGD9Y2M87akuoZwKqFWUJk0nT2lX6kZ0RqodVqFYURVEagwoPSvsyXMLPO7oxq42iLGmSZdK0+mQscnpfKIqiKI1BhQelffEnQIXclnpXwnpN16osfUw6DcN7ywdLQyYmQoOmFUVRlEahwoPStpQNEtXMMspy4MAYzM2WDZYGkNV9MLBWLXKKoihKw1DhQWlfkglYu8Hmry+ApmtVlgWl3PcKoUK1oiiK0kBUeFDaFjOcgNim4g2icRg/aAOrFWWJYkq47xVChWpFURSlkajwoLQvRdK0+ixmltGJkrKESSaguwc2DIVrH4vD/hHM7Exj+6UoiqIsS1R4UNoSMzNtfb1L+XlrZhllGWDTtG5EurrD/cC/Z4b3Na5TiqIoyrJFhQelPfHz1Jdy1YhalyajOe2VpcxwkSrrRZDY5sXfKYqiKEqdUeFBaU+S5YNEZdVqWLteLQ/KksUYA8mQaVp9YpquVVEURWkcKjwobYnxtabREgHTAFHNLKMsYcYPwMxU6GBpAIn0Q2RALQ+KoihKQ1DhQWlPkgnoX4P09ZdspplllCVNCAtcQTRdq6IoitIgelrdAUUphEkmwmlbo5vgwCgLn/0nEAGErue8ADnu5Ib3UVEaQfon38E8+qD9cmDM/q0g5gFAopswf/gdC5+7vPD6Y06k608vqaWbitI0TDqN+d5/IOc+ZzGmR1GUlqHCg9KeHBhFDjuybDM55amYu2+D0WG7IPEEpn9AhQelYzE//jas6IW1G+yCU58GQxsr2oaceS4m8QSMJPNXHhzDbP8D5rkvQkTq0GNFaTDJBOYn34UVK5Hnv7zVvVGUZY8KD0p7kpqA/oGyzWTL0XS//9OZ7wuXvR0zOdHInilKwzDz8zA9hfzpi+m6+GVVb0eechbdTzmr4Lr0z3+Iue5qG0+xZn3V+1CUpuHH76grnqK0BRrzoLQdxhhIjdugz0qJ9FvBQ1E6EV/wrWbsh0S0PorSYfjxO0aTAChKW6DCg9J+TE9BOm0FgUrpG7CCh6J0Ir7gW83YD4umclU6jaRaHhSlnVDhQWk//Ml/FdpXUcuD0sl4Y18aaHlgMApdXToRUzqGjKA7fhAzmWptZxRFUeFBaUO8yb9Uo32NqOVB6WCaYHmQnhUwGFPhQekckgno7bX/D+9tbV8URVHhQWlD/Ml/X5UxD7MzmLnZ+vZJUZqAqcHqVhFaXFHpEMzCAozug22n2O86bhWl5ajwoLQdJlVD0Kj/G3VdUjqRSV94aGDMA4vFFY0xDd2PotTM2DAsLCAnnWa/J/e0tj+KoqjwoLQhqeonUBlXJxUelE4kNQHSBav6GrufWBymUurip7Q/XoYlOXyrrX2iGZcUpeWo8KC0HzUID4uWB50UKR1IahwiEaSrsY9mTdeqdAoZN6VYHGKb1G1JUdoAFR6U9mNyAnpXIit6K/+tL3BMqvCgdCCpiepifSpF07UqnYIfLL12vedupwHTitJqVHhQ2o9qC8RB5ndG3ZaUDsSkxhse7wDA0EYQUcuD0vaYZAKicWuNi8bh4BhmZrrV3VKUZY0KD0rbYVIT1U+gMjEPanlQOpDUROMzLYG16q0f0rSXSvvjCQ8AxDbbvzpuFaWlqPCgtB+1WB5Wrobubg2YVjqT1Hh19U2qIRbHaPCp0saYdBqG92ZidDRWR1HaAxUelPajBsuDiEBfv1oelM6kSZYHWEzXqihty4FRmJ/LxOgQ3QSgQq+itBgVHpT2Y3ICqWUCFRlQy4PScZj0gk2f2tc8ywPjBzGTqebsT1EqxRNuM5aHvggMrFWhV1FaTE+plY7j9AAvAP4cOBVYBxwA7gZ+AnzPdd35RndSWWakJmqbQEX6Fyv1KkqnMDUJxjQnYBqQaBwD1n/8iKObsk9FqYSsNK0+Ma2OriitpqjlwXGcNwOPAm8G/ghcDrzF+/tH4I3Ao47jvKUJ/VSWCWZ2BuZma5tAqeVB6UQy9U2a47ak6VqVtieZgJ4eWD+YWSRRdbdTlFZTyvJwHPA013ULpTX4b+CjjuPEgXc1pGfK8sSf9NcgPEhfP2b3Y3XqkKI0CW/sNy1g2vMfJ7mnOftTlAoxwwkY2oR0dS8ujMXh1l9i5marqwWkKErNFBUeXNctKxS4rpsA3l3XHinLG0/7WlvMQ78tNKconUSTLQ+ychWs2wAafKq0K8lEtssS2O/GwMg+iD+pNf1SlGVOUeHBcZyjwmzAdd1H69cdZdnjWx5qinkYgKlJzPw80lMyrEdR2gZTB6tbxUQ3qduS0pYYYyCZQI4/JWu5RDfZWJ1kQoUHRWkRpWZWjwAGEO+vT+73bkLgOM6l2DgJAb7kuu5VBdqcB1wFrABGXNd9VphtK0uIemhf/cnXVMpm5lCUTqDZMQ/YLDbm3jubtj9FCc3B/TA7U9jygI3VkRZ0S1GUEgHTrut2ua7b7bpuF/AG4DrgeGCV9/cbwF+F2YnjOCdjBYenYbM2Xew4zrE5bdYBnwde4LruScBLKz8cpdMxdREevN9qxiWlk6iH1a1SonE4OIaZmW7ePhUlDH6a1miO8BAZgL6IBk0rSgsJ69PxEeBY13WnvO8Pe9mYHgKuDfH7E4BbXNedBHAc50bgEuCKQJtXAv/luu7jAK7rJkP2TVlKTNYhYDrSb01jmnFJ6SRS47C6D+kOZcytD7HN9u9wAg7f2rz9KkoZMoXgciwPIgJRTdeqLB2MMbCw0FFu1mF72gUcCWwPLDuCkC5LwL3A5Y7jDAJTwEXA7TltjgNWOI7zS2AA+Izruv8ecvvLnoV/+TDcf1e4xkMb6brsX9tzoKbGobsHVq6qfhtqeVA6kVrrm1SBxOKL/uMqPCjtRHIvdHXBhmjeKonFMTsfbkGnKmPhY++Bx/5YeGVXF/L6S+k68xnN7VSDWPjU+5GzzqPr3OdUvY30DT/EbL+b7re9v4496wAOHSD93tcjr307Xedc2OrehCLs7PFK4AbHcb4KPAE8CXidt7wsrutudxznE8DPgAlskbnc4nI9wBnAhcBq4GbHcW5xXfehYCPHcd4EvMnbLkNDQ/T09DA0NBTyUJYeZmGB5Pa7WXHcSaw48dSSbed3PMzs729mQ28P3RvKn7Nmn9tDC/PMDKwhGs1/YYRlfm6aUaC/C1a36bhY7mO2kXTqud0/N0N67XoGm9j39OpVDAN9qUNEQuy3U89tJ6ASWD+kAAAgAElEQVTnNptDc9PMrF1PdNOmvHUTRx5N6o6bGFy7Flmxouy2WnFuzfw8yUcfZMVJp7Hi+CfnrZ/68XdYtfNh1vzZJU3tVz3xz2t64hDDD95DbzTGuhe9ourt7X/oHma3373s7oPZ5G72p9Os3XIEK71jb/fnQSjhwXXdTzqOcw82DuE0IAH8peu6/xt2R67rXgNcA+A4zkeBXTlNdmGDpFNAynGcX2HjIx7K2c7VwNXeVzMyMsLQ0BAjIyNhu7LkMKNJmJ9j/sxnkH7Gc0u2Tf/u1/D7mxl74nEkXX7bzT63C6PDsDpS0z7N7BwA43sTpNp0XCz3MdtIOvXcLhwYg5Wrmt/3gbWkdjzCVIj9duq57QT03GazMDoCq/oKnpN0/1pIpxl5aDuycXPZbbXi3JrxgwDMn3Im6Qsuzl9/561MPbGD2Q6+5v559a1AM088VtN5Xtj1OMxMM5zYs6xqeKQffgCAQysjiHf+WvU82Ly5/P0E4S0PeIJCaGEhF8dxYq7rJh3H2QK8GDg7p8n/AJ91HKcH6AWeTkjLxrLHDyzLzUpRgMV4gDZ16UlN1J6qcnUfiGjMg9JZpMaR9S3QNMXUf1xpQ1LjRRNnZLnbhRAeWkKZ5B8Si2Mevr+JHWocmedHMoExxsalVLqNhQVbuwPsu3vdhjr2sM1JJqC7GwZjre5JaEIJD47jrAQ+CLwCGHRdd63jOM8FjnNd97Mh9/VdL+ZhDnib67r7Hcd5C4Drul/0XJv+F/gDkAa+7LruvZUe0HIkc+NG8827efgPsnYtopYaL+jjWgnS1Q2rI+0rIClKIeohOFeBROOYh+5p+n4VpSSpCRgs8i7ohHSt5SrGRzfBrTdi5uZCuV61Nf4cZCoFE+MwsKbybYwNw4LnzT65zISH4QQMxpqbLKNGKol5OAx4FfATb9l93vJQwoPrunlRQa7rfjHn+yeBT4bsk+KTTEDPClg3WL6tF5BpUhPt+dBNTSD1CNyM9KvlQekYjDElNa0NJRaHW36BmZ1Belc2f/+KUojJcWRLkVq1A+tg5er2ro5eLu14VqXsw5vXr0YQtFwm91QnPASv5TJ7d5tkIpzyt40oWuchh0uAV7quezPWKoDruruxAoXSYvyBJ10hLme7ZyJKTdRnAhUZwEy26TEqSi7TU5BOt8TykEmF6bsMKEo7UMISJyIQa+/q6KZM2vFM/Yo2PoawmOEErF2/+H812wieh3adnzSATCX1EG7n7URY4WGWHCuF4zhRYLTuPVIqZziRX4WzGKv7bPq7NpTszfwczEzVZwKllgelk2hBdWmfzEtrCUxilKWBmZuDmenS90Ms3t5j1n//FLU82FgNM7ynSR1qIMkEcsKpNtaw2msS+J1ZTu/uiXHr7rVEhYdvA19zHGcrgOM4cay70nWN6pgSDpNOw3B4qVVErOtSO2rlJ8s8bCtAIgPLSnuhdDjl/KMbScB/XFHaghDFQiUWh5F9NtC2HUmN28n06r7C6/sHbGxeh993ZnoSDh2AzVtsvGKVx2OSicV4x+X07k5a4VGibRr4X4SwwsP7gJ3APcA64GFgD/DhxnRLCc3B/TA7W5nUGhloT618qvwLIzRqeVA6Cf9l2dcCy0NkwCoU2tl/XFlehLHEReM2wHZsuDl9qpTUBKyO2AQeBbCuV0sg01lyL+AJc7UcTzIBW45uW8+IRlGsknq7E7bOwyzwTuCdnrvSiOu6pqE9U8JRQZrWDJF+TDtK9l6fpE4xD0xOYNLpcLEgitJCTDkXh0azFCYxytIhhCVOYpttutbhNg02TY2XVYR1SqXskviT32jcZm77/U0Vb8J6UOxFTnkqpl09IxpFMmEtVEMbW92Tighd58FxnLXANqDf+w6A67o3NKRnSiiMZ/IiWqHl4dCBxnSoFupteTAGpiZbE4SqKJWQ0bS2ZqxKLI559MGW7FtR8ghjeQimaz3xtCZ0qjJMmOQf0TjccRNmfh7pCT0daysySofYJntNJsZtNsdKnmUHRmF+zv6+XT0jGoXnrtVp6XrD1nl4HfA5YAKYDKwyQJFcakpTGE5Ad09FtREk0o9JPNHATlWHqWfQaF8gq5QKD0q702LhgVgcfvcbzPwc0tNZLzFl6ZGxxPWVuB/Wrofe3vaNGQjz7onFbZa1sWQmgLrjSCZgzTpkVZ+1YIKdl0SOrWwbeDVn2tUzokGYZAUJb9qIsKLu5cBLXNf9SdmWSlMxyQQMbaysuEi7SvZ1tDxIZMCrpN2Gx6kouUxOQO9KZEVva/YfjYNJw0gSNmkGbqXFhFAkSVcXROOY4b1N6lSFpCaQMu5UWZWyO1R4yJr8Bq1BR4YXHhatF/H29YxoFMMJ5PRzWt2LignrDN4D/LSRHVGqpBqpta8fplLtl6UiNQ7SBauKZKeoBF8AWUYaDKWDaVWBOI9MzJQGTSvtQGrCBs4Wy1TkE23jdK2TIS0PdHims2RisWbF0KbMskq3QU8PrB+07k7L5L1tUhM2VWsHWh7CCg+fAD7gOI5GnrYRtrjI3sqLi/iTlMlU/TtVC6kJiETqE+DsHeNyMn8qnYspURCrKSyFSYyydJgch75+m5GoBBKLw/BeG3DbRph0GlKp8gqBNetg5ar2FYDKYGambbyC9/yQlSth3YaKj8cMJ2Bok81M1a6eEY1guIqEN21CUbclx3GewMY0AAiwCXiv4zhZheFc193SuO4pJRk/YIuqVRIsDdla+WrKyDeK1Hj9UlX6xzi5TB5CSmfTYssDA2th1eqOncQoS4wwwcZgJ61zs3BgDDYMNb5fYZmetG6A5bItiVjXqw697xb27rb/BCe/sXjlVaaDHhQBz4iK3LE7kCx3rQ6jVMzDq5vWC6U6qknTSjAeoL208nXVvvap25LSQaQmWhprkMk5367+48qywoRMdCHBAN12Eh4yAd8hBaA9jzW2Pw1ifu8uIHsOItE45p7bQ2/DelAkkONPsQuCnhHtpNxsBP7zdqgNUw2Xoajw4Lrujf7/juO81HXdb+e2cRznJY3qmFKeqqXWdtXKp8atBrQOSE+P1aSm2sw1S1EKkZqoT32TGpBoHLNrZ0v7oCiAnXyvWVe+XTBAd9uTG9ypCqigZpHE4pi7b8OkF4oWlGtXFhKe5SGabXng0AHM9KTNwFSOg/thdmZxHhOJ2L/t5hnRCJIJWDdo3b06jLDO5dcUWX51vTqiVEEyYYPKBsOnaQXaNx5gssLc0OWIDKjlQWl7jDGey16LUwrH4jCyr/0SKSjLj9R4uHfB+kEbaNtubj+VZA6M+ZWyRxrbpwawkNgF/QNZ1ypjhUiGtGIG0rRCQOBaBu/uTk3TCmVStTqO49dw6HIcZys29sHnKGC6UR1TQpBMwGCs8rzsmZiHNrQ81FP7GunHtJt1RVFymZ21BZJabHlYnMQMt2fFXmX5EDLmQbq6YWhT28UMVFKzKCtda4dVGV7Ytzs/5jKTuW0vbClfBiwTHxGMeYD284xoBMMJ5MlPbXUvqqJcnYdHsEHTAvwxZ91e4LIG9EkJiUkmqnvJ9wXMgm2CSS9YH0e1PCjLjVYXiPPI8h9X4UFpEWZhAaZS4S1xsTZM11qJ5cG712yl7Kc0sFP1Zz6xC9l6XPbCaMCVLMxGkgno7obBmP0e8IwI9fsOxUxPWZetpWh5cF23C8BxnBtd131Wc7qkhCETZPT0Z1b8W+nqtgJEO1ke/LSxddS+Sl8/Zs/jddueojSEyfD+0Q0l6D9+4mmt7YuyfMm8C8IJDxLdhHnwHowxZVO7Ng1fIRBGAFo3CD0rOq7GipmbIz2yD3l69tRQVvfZ2MWwx+N7UPiZlTJuS200P2kEXrB0J6ZphZAVpn3BwXGcLcBhwC7XdZ9oZMeUMqTGrXam0jStPu2mla9jdekM7XaMilKIRoz9ali7AXp720+LqywvKrXExeIwM22rEq9d37h+VUJqAlattok7ymArZbef61VZRvdBOl1Ycx4Ln342z++/DT0jGoJ/fqqdw7WYUMKD4zibgG8BZwOjwKDjOLcAL3ddd08D+6cUo1apta/fpkYNiZmehEcfIlP6o7sHjj4h1MMxFBVkpwiNV6nS3H+n/S5dcNTxHZnZoBWYqUmYmULWDba6K3XBzEzbrEYhUjqavbth4+aCmkyzdxdsPKx+Ws4K/KMbSSbn/I6HFu+ZFSvh6OPrU7hxmWOG92ZrY9esRw4/Mr/d3BwcHEM6zP+9blT4LsiKGahBeDDjB6Grq+x+zcICPLIdFub8HsDW46zG3afS+L0SrldmehKmppD1zX8O5x9rYN3OR4DFQOcgEotj7rsz+9179PFIb/a71xhj/f6PPn7xt93dsLp+nhHmwFh2KtzVkXxXK7xj/eN2G39WiDXrkMO31qVPEMyW2ZkuomFnfl8E7gYucl035ThOBPiot/wFjeqcUpyai4tUqJU3//MNzPXfz1omr3sHcs6zq9t/Lpm82HXUvm4YgoUF0ld+KLNInv9y5AWvrN8+ljDmO1/FPHQv3R/5Qqu7UhfMD7+FueWXdH/yq6XbjQ6T/uBb6XrL38Ppf5K97okdpD98KV1/9xE44dT69KtdLA+AHHYE5rZfZd0zXZdeBief3rpOLRHSV/yjrcbr091N16f/Iy+rkPn59zE/cum68uv1U850EpMVvgu8HPlmZB9y7IlV7zb9pU/BylV0v+39JduZ23+D+fKns5bJeX+GvOqvF9tMVlazSGJxzPa7MOl0nqBu/us/MPfcTvfHvhR6e/Wi0LFm0d1duD7NYUfCzb/Ifvde8hrkopdmt5sYh6nJ/HmMp/irB+mrr4CH789a1vWhz+QJAuaOmzBf+lTxDXV10XXlfyL1mqMMJ6xAEiadbRsS9sl0LhB3XXcOwBMg3gvsbljPlNIkEyBSdWCjRPoxI/tCtzd7HofNW+h6zVvBQPrKD0Id4wkqyU4RFjn3uciWoyFtU0+mv/zPGgNRAWbXTti7GzM7k6cx6kTMrh1wYBQzP196UrZ/BIzB7NqJ5AoPu3d629qJ1El4aBfLA4C8+q3I+RfZLzMzpK/6EGbP44gKDzVhJifgwChywcXImediHn0Q8+2vwr7dcNS27MZP7ITpKeuWWqe6N51Exe8C/xylDtW249GkdcMpx57H7UTyXZdDl5D+xr/lv1eqsTzMztoA2hwLg9m906ZQDls3oZ7seSLrWHNZv2UrB3pX5y2XCy9GjjnBVtkG0v/2ycLzhaR1XMnzoIgMVOQZUZI9T8DpZ9P1nBdiRocxX/40Zvfj+VaEPY+DdNH17vxjNfffjfnBN61rXJ2Eh05O0wrhhYf9wIlY64PPNuBA3XukhCOZgPWDyIre6n4fGcgEaobdnxy1DTnG0+zU20czo32to/DQ05P9Yt68Rf25K8E/VyP77LnrdPzjKTcp88diobHiL6v32O/pgTYQ0GR1HxwT0N729XdcIGdb4ruZbnuyfYZGBjDf/qoNTs8RHjKpK+tYNLOjqPRdsLrPKtJqnWymJmAyhZmfK53+3EupKsedBIAcvhWz/a68bUkFz8ws16tc96TMMydc6tO6MuwFM3vHmkvP0BCM5NenkJ4VEHBFIn54wflCXppWnzpZHkxq3NYMOfp4e99tmVnMKJdLMgFDRY51esr+rp5B3MkEcnwbFTaskLDCwxXA9Y7jXAM8BhwBvB74/xrVMaU0ZjhRW6BNpB9SqYJm0rx9zc/B6DCcdd7iwnqnx8tkp4jUb5s5SCyOeei+9srK0aaYyQmY8DR5yUTHCw9mft5qFqHspMzXfBYUjr1l9RWcrZayLcdkBYGPSnHy3EyHNtoJbykBdalnmylGhe8C6eqyQm4Nk02TTlt3KWNgJFnYFcdvm6sxjsXh5hswM9PIylV2WaWWh0x60z3ItpMX9zUzDQfG7JfhRNOFh3ppxyUax/z+pvwVyYSNhxjMju+RyABmJFnzfv1Cdb5lQ3pXwvqhgvedTX1f5FjrXLjOzM5YC3cHWx5CRcG5rvsl4GXAEPB87+8rXNfVCtOtIpmoLcVXZMCaFKcny7cdSdq2gRtLYnEY3msfuvVgcsIGMvnp2hpBNA4zUzCuBrOyDC9WB10Sk8exYfArJ5eblPkWuQLaqcy5qKM23qQmWl9dugjSjjn0O5FMZhXrZioreu0kJmcc+ZpSYOlnmylGqop3QaS/NmFratIKDlDy3s4E+EZzhAfIPDNtxfjKYh7YELVJSHL33cLncCYdfD0muLE4TIznuyIlE7BhCFmRY+mJ9FfmGVEE47lFEd2c1Zfcc1n2WL1rWTdXqmHPZbxDMy1BeMsDruveANzQwL4oITGTKRg/WJvUGqwyXW7i4j3Qsm6saBzmZq1WJET2mrKkxhseMJplGl7TJin92pSsh+tScFsJHk+5SZn/gvBedlkBrf65GEmWd28IS4XBlU0lFoff/aZ+x7pcSSZg3YZFzTQUtuokA5PF1MSSLpJVlGreBZGBxViJavfpUbK42cShvADfrPfK4UfatLEL8xVZHqS7G4Y2FhgPicL/N4MJLx18PSwPwQKUkWMzy4taNvoGQntGlMR/XkcXLRsSi2PuujW7XarMsdbZ8sCwH+uxuUzD9iVsqtYVwAeA1wCbgT3AfwCXu64727juKQWpQ3ERiQx4PnzjZYOuC2V2ynoY1EF4aIr2NVgE65jqs3IsC/xrHn/SkrA8mIAAVHZSFnxBBF52JjVhX6iHHQG7H7OufBvr8PBPjVvNYzsSjVurYxlXDqU0hSZJEotjfn9zTrtA5vNlankwqYnKY98i/fberJagRrnU8y5ZRJGGTcUrwW1V6oJbwMqXeW5t3tL857AfzBytwzMu+O49clF4YDiBnHFOfvtI/6JnRC3zgmQC1g9lJ/yIxmH8YHYAeqHrGqRecTUeNWfLbAPCinRXAM8G3gKc6v29APhEg/qllKAuAy9oeShHMgGrVmf7iQceBnWhCZYHBqPQ1aVuGGHwNaVP2ro0zpfvWwvhLA9eWxNwG8hY4E46fXGb9SA1Ub/0f3Um8zJdCtanVpLr6gKeK8cha0kOtMuwnGMeKnwXSF+NBUH930pXyXdaQUVapB/6BwKxKtXVLLIugnutC41Pci/0r0GOOKbpz+GiwczV4KXTzXLD8pUxhbZfpyrTxYR2INvKV2ZOJV1+7Yk6CfTJhI1za1eLcwjCCg8vBV7guu5PXdd90HXdnwKXAE7juqYUJePHV2PMA4Qy9fo3YFZA54ahwj6a1ZKaqG+BuAJIzwoYjC2NyXCDMcPeQzcWh9FhGzTfwZhkAuKH2y9lXkgmNb7YNjBW/BeMn7bU1HHst0Oa1oJklAR7yzRUimGmp2wKztxJjP/8DgqonqaUvjpOVDqNat4FNcY8ZN6Dm59U3vJQIMCXaDw7SxZUfk8XiMnLeg4fGMXMzFS2zVrw08HXoVihrFwJ6wazz20hd2i/fUa5WeM9MLw3f/uFFCJhjrXWuJoAnZ6mFcILD8Ws/MvSJbPlDCdgbY7/bKVUaHnI1ZpJVzdEC/hoVkszLA/gPeR1IlSW5F57zYNuK51MMmHdbsJMylITVjhet6Gwz/FRx8PK1XURQs38nJ0wtKsGamCttTqq5aF6irmZFrDeZiYVkYHla3mYrC7mgakUxqvpU/k+7bmWrcfB6D5bbbgQRQJ8sxIL+EXuKrWeZDTi2c8c8YUHgJEmvruSCdgQzQ9mrpacGJ/M/4XcoupgeTDTk7YuQ66S1XPTzovrWz9UOvV9ZABThyBuoOCcqtMIKzx8G/iB4zh/6jjOCY7jPA/4HuA2rmtKMewLpsaS5n3hJHuzMA+j+wpLydH6ZGIx6XTTtK+aPaY8ZmYaDo5Za9MScFsx6QUY8YShMJOy1Lh1g8gNaE0mYN2g1aLF6lTnJDPRaE/Lg4houtZaKeYS4ceaBeMc/MliX3/9JiodhH0XpGzAbCVE+m22pKkQ2QML4b8HjzzWZmUbGy7cv+EiGuNYHMaGMXNz1Rc8zREmzdyc7Uc0XliwaDD11o7bDI0FlDHRAtr+THajGu6BZGGhXVathrXr863K5Y61TpYHe107O00rhBce3gtcD3wOuAP4LPAL4O8b1C+lFMkCprgKkZ4VVnta5mZIjyTtw7SQabGQj2Y1TE9Z7XYztK+xOExO1PZQWuoEzcn1jm1pBfvHYH7eHkuYSZmXZlGi2S+7rIlD7ouwWlLVaSmbidRJSbBcybiz5FpvV66y1i1vfdrPohdWyF2KTE9W9y6oNRtOagJWrUYKuCtmUSydZyzu1YjYV/09nRuTN7LPbjMWD9SBaOJ9WChOpxZicTi437rxwaIyplBxzHrEPJSK2YjmK4bKzakkUmNcjc/oPjvGO1x4CJVtycuo9EHvo7SQjFa4Hjd1iCqO84ldQJEsBLGAj2YtqU+r1dRUQVZava3tqe1tOUFNacZtpYNdvfysIbE4psykzKQXbMq+yICd2HkvO1ll3ZTklDMXt3XXbZj0gnXhq5YqgyubSiwOd91a3JVDKU0yAQNrbfXuXAJWnYW9gWftE49imumi0i5UWl3aQyL9tVUA9ou6BbMCnXRaVpNMDY5CirRo4L2SGocVvYUnxaWOITcmL5ABKC8ou8GUDGauksUMjXvhSVuLW3EgtGdEKRbdovK9NCQWx9xvq4JnCqI2yfJQNrNThxC6zoPjOEcCpwBZ4rTrut+oc5+UUtQzA0Kk3944JfBfaGUfmLUID76/abMsD3gvh63HNX5/HcjiQze+JNxWgllDJNJfelLmF4uK9CPrNmRedia60frP+vdBNG5zuY+N1BZQ2AGWB6KbvGMdho21B08uN0q5REh0E+beOwFYSOy2C5dzzENGmG6u5cH4Rd3WboDe3iKVv0ukSPfy9ZvhPZUXiAsSXXSHNF4tgOAzp25JGspRIpi5ajJuegnwsvj5yphcZMUKWLmqtnsgmYA166zip1Bf/AB0PyapnEI2MmC9FmqsPbEU0rRCSLclx3H+EdiOtTz8deDzlsZ1TSlIPaXWEGa4hcQu+zBduyF/Zb1cWvw+VOrnWg1DG21WhQ6eDDecHE1px7utJBPQs8Jm+yg3KQtawYJ+xjkTh3r5IFftH91EMoWMOjjupaWUcv+IxuHgGGZmelFRE93kVdi1RbKWFVVaHmquADxpY+5EpOgkvWC1Yp/+AZvKM5mw93SV97Mfk+dXPGZ1JLOtZsbrNWSCG3C9ygQzl9p+CM+IUpSMYwgEoIc+1lrjanySCVs3on9NbdtpMWEtD+8CznBd9/5GdkYpTylTXMVE+mHPEyWbLCR2LWqgc6lT3QRT7QujCmRFr02F2MmT4QaT99ANuK1Idw0uOi3CJBMQ3YR0dWECk7KC2qOUzbkvkf6sl53gxfVE41l/TTKBnPiU6jvXCZaHpRD30iLM7Ezp4MiAYDaf2AVr1yOrVlv3unoUyeowFoXp5sc8yOYt9v9YHPbuzm9ToFqxT5aFdm6uBstD3LpNpsbzU6TH4nDbrzBzc/XLgFQM/14fqsM8w0NW91k32OF8ZUxB+gbKekaUJJlATji1cF+C7sth51S+cnOyBssSniW82JyqgwhrexkFdjawH0pYkglbNKYOL5QwAUDze3cXN7nXq25CsydQ0U3NM/92Irma0lh80W2lEwkKQ8FJWSECloDgy25RO+W9YNZtgBW9tWvjU+M2b/yqAv7w7cLa9cVdOZTSjOyzf4s9QwMWLF9RA1SWSnspUa0iqa/G8xWwFvhZgfLSvno1OIrFMmQsA6nxqq3oWRbN3CDeYFB2owlmlqsnvoBVJIlAFjVYHszMDBwYLS60BwPQkwlbELVM6nupVUD1CRGc3QmEtTy8E7jacZyrgKyE767rPl73XilFqWv6NC8AyBhTUAo26TTpvbtLa1ZzsxZUQ7Xapiqxwa63NmVfnUYhTelioFuiPhavJmKMscKQP4aDk7ICAnie5tN72Ykxnv+s58rV1ZXln1w1qQmIRGryoW009lg7O+6lZZRzMw3knF/YuwvZdoptHxnwAoDHO+6eq4mMC2uFNRK6u60rSBUTO2NMdpxCLG6zs+0fs9Z1v125d280DnfcZBUPRx5bcT8y+wZMYheMJuHMZ2ZWZcUY+lmhGkTJYOYakFgc88A9+cqYQkQGIFHaM6IoflxbMaE9EIAeek5VB4HeLCzY6/rUc6veRrsQ9o3VCzwXuA1rgfA/OxrRKaUEw3WUWiMDVqM8M114/YFRmJsteWNl+WhWy+QE9K4sXaClnsTiMH4QM5lqzv46iUKa0k52Wzk4BrOLY7is9sg3k3uTl8z4LvSCqYcPcg1ayqbS6XEvLaKcP7X0Rax164mdpINC+3K2PKxcVZ1bTl+V2XBmpu170Lc8+NrwYP0NKK8xjsUhnbbpdqt1wY1usjF5D9xtt5X1HA4EZTeaRmnHY3HYPwK7HstSxhRCaol5CBMb6se2hE1J613TmtK8jw0XTX3faYQVHj4PvA9YA6wIfJo021MgoBWuV+7lcqZe/wYstb/Yoo9m1dQQYFYNi4XPlmEqxHIUeuiWykDS7uQeT7lJWa7m03/Z7Xk8v8p6LG4zMdUQ1GpqyczSROpxrMuS4QT09ZdOxRuLY7bflfkfqM9EpRNJVVFd2icyUN35ynWbzUzSAzVepiYXa3AUIeuZWeUx2Ji8Qcx9d+ZvMxCU3UhCBTNXi+8utP2uEAHKAxnPiEoJZgwshsTisGsnHNzfNMtDqDlVhxDWbakH+Krruprou5WU8Z+tlCzTeMA86xMmC0FW4FGV2QOaPoEKpms94ujm7bcDKHTNMxlIOlB4yHuJBCZlBcPV/GJRPT3ZvyuUBzwat5a5A2OwYai6DqbGrea53YnFYX6O9Ngw0HlB860ijEuEROOYPz5g/1/mlgczOVG9JS7Sv2g5rITcWivrB212tuDzLkzq0izhoQZlWDQOD96Tt+GooDUAACAASURBVM2mpc0OE8xcJRLbbOcL4weRk88o3TjSv+gZUSjdaimSCegfKJ3yNxaHW2/0+hXiWOtZe2IZWR4+BfyD4zhVh4c7jnOp4zj3Oo5zn+M47yzR7kzHcRYcx3lJtftastS7uEg5Fw4/xeX6weLbqIdLS5MtDxQzSyvFNaWd6raSTEB3D2zwhOMwlofAsWfSlELeA3/RglXDeZmcaE59kxrxj3XBKxqphCSM+0dwvf9sqsNEpSOpwfJQdQXgnDingvFMYSZ9a9bZ2gTUVrMoM15WrrLbzF3XaIu5v/1GaMcLuMMWpYYq035Go7r1BaxCadXq2i0PvSttEooOJ6zw8A7gMmDCcZzHg58wP3Yc52TgjcDTgFOBix3HyYsochynG/gE8H8h+7WsqLvU6j/gimhrzHCC7o2bS1fQrUfdhCZbHmTlKuuKoxmX8iimKe1Yt5VkAoY2LqaYLTMpy7OC5QSOZ9GJgnO1eMc6r8JDaMz8HIwOl39e+/E4A2szk07pWQEra5yodCKpiervh2orAE8WyPCUE88UJkW6b6HN21alBIrC5SUyicZhNImZn69++2UIFcxcJRLpXzw35SxykRoE6BBCe5brUFhBqVoB1cMPRO/0NK0Q3m3p1TXu5wTgFtd1JwEcx7kRuAS4Iqfd24HvAoXLDi53hhPQF6nfZKOcC0cyQXf8cEr5qmXqJtQyEU9NlPYJbgSxOmTKWYokE8hR2/KXe24rHBhd1OJ3ALlZQ8pOynItD/7LLjWe/4LZMGStGlWOI5NegMlUR8Q8WFeOnsVCZkp5RpI2LXCZiYnv+tkTP5ws0TwSWX6Wh1oscd59Wix7YDFMbpwTnivZ9rsWt5VMZGpwlCQWh107arqnM67AhSa/sbgNuB0LIZRWy3CibDBzTcTisGO8vEWuytSoZm7OOz8XlOmHZ1UOFEQtS6S/5toTjc6U1SxCCQ+u695Y437uBS53HGcQmAIuAm4PNnAc5zCsQHEBy0h4MPPzmBt+iJx/UdlsQya5t77FRTKSfX7WIb/CZc9pTy8pPADWD/PBe0l/68sFV8vxpyCnPq3gOpsmb7zphZAkFsfce2dT91kN5vbfZPyhyzIYpevZL6xs+7t2Ym76OfhF0EaH4azz8tplxbZ0iPDgj2E55sTsFaWyeKQmkHU5bnqxOAznuyJIVzdEN2LuupX0/FzBzclTzkK2nVx4X362rw6wPEhXNwxtYua235Ae987dylXInzvNy5JWZ8wj2zF33FTVb+UpT0e2Pbl0ozB+8pCZBHZvOixbeKihSJYxBnPDj5Czz6tLTaBmkHkX1GJ5SKdhesqmbQ1LoTpDsTjMzmK+/gXMil7MA38IpZ3OPCfrYHkoNG6ynsMNEh7qmg6+ABKNY3Y8VP58etcj/fMfIH/4XfgdTE3aehjljsEPQK/kWGuwPJj0gs3sdMpTq/p9u1FSeHAc53nAIdd1f+t9Pxr4d+Bk4Gbg9a7rllW7ua673XGcTwA/AyaAu4Fcu9tVwN+7rrvgOE6pPr0JeJO3XYaGhujp6WFoqMqAxRYzc9etHPj2V1iz9WhWnX1+ybYjh/bTc/iRrKvjsSZX9bFqZpI1OdtcGBthZHaGFYdtYaDM/lJnPZOU+1X47c/z1pnZGbruvYOhCy8q+FszM01yfo5IbCORJl7D1JHHMHHTzxnsj5TXJjWAsGM2+c2rMZMppLeMYDk/B7OzbLjoJXStCR+Ae/Cb/8b0L36c0bxI/wDrzjyH3tzxsO1ERoDI5Dh9bX6v+ec2fWCM4ekp+rcek9Xn0bXr6JqfZX2B4xieSrFyMJp1P6TOuYCF4X159wjA+NOfydTPvl947E9P07PncTacc17Bfs7PTjEKDGyMs7rNzyn4x/o/9ljTacz0FGtPexorz/iTVnetKg5c/QNm7vhtxfe/mZ6mZ/djbDin9PN6MnWIcWDw+JPoWreheMOhIfafdharzzyXtYFxMLZuPcxMs6GKsTG34yHGrrua/rVr6Hveiyv+fStIHzrI8Pw8kU2bq3oXTG2McwjYsHIF3Tm/L/W8HU8vMNnbS3TzYZllc2f+CQd+7GJ+92sABIj82YvL9mv2nPOZ2PEg6486tuoq0GbNAPuPO4n+s5+V/xxmm30Oz0417Dk8cnCMFceemDUWi1HN3GvqnPOZnp9l/RFHlmyXjqxmLH446Yfvg4fvq2gfXeuHWH/G2fSU6duhcy+kO3546PF2YP0g848/WtV8c2F4LyPz8/QfdWyoa9fu89pyloePYOMdfK4BDgKvBP4SG0j9qjA7cl33Gu/3OI7zUSDX/v1U4DpPcBgCLnIcZ9513e/lbOdq4GrvqxkZGWFoaIiRkZEw3Wg70g8/CMChRx5i4tjSmqyFQwdI966s67Ga6EamHt/BbM42zUP2ZpVYvPz+znkuXec8t+Cq9H//Jwv/912G9+5dzGAT3M+Y3XaKLqaaeA3TETvBHnngXuTwrU3br0+YMWsmU5hDB5CXvI6uPy09AUjf8gvMNVcy+sRjyMbNJdsGWXhiBxx7Il3v+Vhm2SGA3PFAF/T0MPHow0y2+b3mn1vzyHYAUpE1WX1eWLka9o/mnX9jDOmJQ0x392TfD8+ygm/B63XxK+i6+BUF+5H+6meYu/+uotfZ7LIhYxMGUm1+TgG4+OXEXvc39tweOoB512s5+MhDdB1xXKt7VhUL+0dh25Pp+ruPVPS79LX/wty9vy97/6Z3PAKrVjM6t4CUu75vfR+9Oc+Ehd5VMPpEVc9785Ad+51wv/qYHQ8BMBlZU9W7wM/oObbrcaQre+Je6nmbHklCX3/2+v51yCevzXLnnYLy/dq0Bd79UUYPHqy4/1m852OFn8MzVuc6kdzbsOu6cOgg6RXh5hlVzb1OPB1OPD3c7z78+dCBubkcgLzzl4fzBiDEdfVI9/RiDh2o7p58wM6pUqsHQl27Vs1rN28ON38od12OBn4H4DhODDgXeKPruj8C3gyUVr0E8H6P4zhbgBcD3wyud113q+u6R7queyTwHeCtuYLDksT3ly4TM5BXBbNexOIF9+3nuO7eVKN/XtBHsxCTOWnymkTGJNzOcQ9e1oswOaHLFj8rRnJvuO17biumg4LMi+b6LhZYOTNlx2q9xmIsDgdGMTMzhdf7fegQt5IsBtbazCMdNB7ySE1U59ITi8PBMUyx4poevvtHtW6mtRTJ8sd+J8V11ZwQpK+67DymU5IWgK2307OiYYH0Jr1g6zZ1QhxWK/DeHVXVnvCflbHwyr12phKh7mxgh+u6u73vo0AlI+y7juPcD/wAeJvruvsdx3mL4zhvqWAbSw5/QJV9yOdUwawXNovOPvvQCJJMQHc33TVmXCg7SS/kb9oMvKwZ7fxyrSjrRbmCf4W2PzNtKzCXyCCSRT0qKjeT4QRIFwzFshYXTelY77Hoj/2RwqkVM4GanTJxCWAzy3R40oEq04KGTtGbDFm5thg1FMnK3KeddH2SCZu5L+zzKJdqFSgdUqgRvPuuxow/JfHjBTrwmdQUIgOLtScqJUzq+w6inNvS74B3OI7zZeANwE8C644CQttUXNd9RoFlXyzS9nVht9vxhH3IN0pLGY3bm2FsxKZdDfZrMIZ0h03IVQQ/neVwokhRrtZMoKQvYrWn7fxy9etQhJmAeC+/opmzClGhJkRiccwDf6g4m0nLSCZgMGozLAUJaI+yjiO3WFSNZAU3HnZEfoOMsNKhL+pYHHY91upeVEVNltygQqSIy6NZWIDRfVBLPEgNRbIyQp2XXlm6qnX+aCLJBKwfrD4APxIBbLrlip5OhTKptTORfptSuhHk1LxQcgjWCarmnoxu6ox7MQTljuJvgbcB+4HjgI8H1r0G+FWD+rUs8KPv6e6B/SOY2SLuDVD3iY1PMctA3TIu+IVzikzSTassD9Ccap21MJyAtRtsXYpyVFNQp9Kig7E4zM7Awf3h99FCio7hYtqjeo/FctatTIrISH3212QkFoeRfXai3GnUYsmNhqjvMTZsXeCq1aJDTUWyMsUR/fTKHUCowl6lqLawXqozCjVmqMGdrSzeWGt66vQOoWr3YGhohqxWUFJ4cF33ftd1jwZirutuc103WJL3KuCtDe3dUmf/GMzPwzEn2O/D+4q3bZSGvsCL0BhjU4rVQRvjF84pO4FqwcNKok2o1lkDdvIbcvJRxYszTOGjIJnx0M4CV5BihYKKuXjVeSxKXz/0ryl+viYnYHVksYBdp5GxWhaJZ2pnarDkyuq+8lbLjGBevX9ztUWyzMyMFRj890qn368hkd6VNiagUmFrsoNiHiDjztYQCtS8UAJUe0/WcU7VLhQVHhzHydgOXdfNU124rnvAdd1Jx3FWNqpzSx7PLUVOPt1+H95TvO1kgzT06zbAit5s/92JQ9b3sV5Scilf+dQE9PTYku3NJha3Fp+52ebvOwzJvaFfptLdbXObV5IXPpmwBXLCar4DLmjtjkmN23NR4GFdTHvUECtYLF78fFXpc98uZCbGHTAe8qjVklvGallz8C9U78PvxdjISadn96WNMZMpGD9Y+zunr7J4ADM7A7OzHXUfSl9/JtFIvWmpJ0An4N+TldZfOThmx9kysTz8wXGc9zqOU1B14jhO3HGc9wLtX2mrTfEnFWEe8o0KrpSurvzAx0rdWcrtIxaHkb35QdmQKQrUEh/6WNwGh42UsPi0iMVg5gquQV9l5uyKXdMGY9Dd3RmazFJjuNikrAFaNykhOJvURGdpPHOJhXDfaVdqfJ5KkSx1GZIJqwVfu76q7QOLE7hKJyr+2N92slXMdML18TPL1frOqTQeYLID446KZYurBx2cxKEpBGILK6LOc6p2oFQ07LnAPwB3O46zH3gQGAcGsPEP64BrgWc2uI9LFz/6/rAj7M1a6iHfSI1AzgSnLlqz3O3Pz8P+UTsBDWBSEy0zkWYFtMaf1JI+FKWatG6RgcpenMMJ5LgyVXIDSHe3vX4dMBkpOYaLTcpSE9Dba90f6kU0DrfeiJmbyy8a1eGWB9autxPkDhgPedRqyY3F4eZfYGZnCo4X33+/puBIL/VoRUkQCIz9jYd1THrlur1zIgOVaeUzPv4ddB9GBmBmuvAzpVY6OX10M6giqyE0YE7VBhQVHlzXHQHe7TjO+4CnA0/GCgz7sYHTt7muO9eUXi5RsqLvywXvpsZhRZ0nNh4Si2Puu3MxK0fSS3E5uLH8j8NuHzIZnLJoZY7tgOa07XIHVaOpqCCQzszO2AxblT7M2j3I3KdU2sdIkUlZanwxV3y9CFq34jk1U1ITSO790EFYq2WHjIccarbk+hbBkX2weUv++mTCTt5rodqA6WQC+gfshLhT0itXklmuFJH+yo63EzXtQeVHLZatQqTGYXVf58ZhNZjFuJoqLA/d3bAh2piOtYCyeThd150Ffu19lHoScBuRaBzzx+3F2zYyF3U0DnOzcGAMNgzZfm0Yqp9WI5B1Rk44NXtdagIGW3NDSWTAahLa8OVaaTAz2OPxK3aXxXfVqlB4sOP0gfZP11oq7WOksPbINOAek+imgHUrV3jocMsD2GfHvt3l27UbNVpysxQiOcKDn0VPnvzU2vqYKQhWYXBmIGtRx6RXriSzXAkkMoBJPRT+B53o4x90u6y78NA6T4COoa+KgPVkAgY3LimhbGkknO1A8qLvY3EYG8HMFTbmNLIKZm7RIzNc55Ri6wbtS7DQJH1yvLVp4dq10FWlwcxQWQq/an0wY3EbTD9xqLLfNZlSaR+l9/9v787jHLnqe+9/qrunZ6bVPVt3a0YztrHBNuAFjAOEfQkOYQ2QhAMkgUDAXAi5QCAb2SDkycZ9yDVLgAuBBwgEcgLJDUlIgkNCgMRmsdlsA8bG23g0o+7Z3Pumev44VWpJraUkVUkq9ff9evWrW6Vq9VG1VKqz/H6/nS5JQPWxSiLrSrDszK9KhuAXi8EHdYpGPGtwa/9dLYFU6XQmt1G8R5hFr8Nz6GZBsNYvVErv65SkV24ps1wjLcYDpLFQo1dn8CMOqaq23SuZ8ZZjHmK/puoD6jz0SnX0fTYHftEVFqolyZmH6g/Cmc5S5lWrGZQd6nF1z6aBjz3SVp2NsCJthAu5dtdgNq0Y3i+apX0cq3GRkcRrcXwCdme2Hq/lJfd+T9OIZy3ZXKpqCZR0+L8uzVrWOneEWfTiOIe2eKHir6251LllM9quTf3+fo2eWa6hzASsrTaumVQu7TMPcVtMT7XtnmmxQ+/7fsdpiPuROg+9UjXy2/SiLIn12KEDU66gUCHvlm7Mz8XfS65xke6vrbliTb0c6cjmYLaAv95n4Tvt5ITOjLsL0uWlSI/P2Hjrsz4pyLBTjJL2sdZF2UL8s2Ce59WOE0nhiGctqelMVollhLVO/E8pQDmmzkNLI8yzJ1yMTfmgVHmb+lBbmeXqaXVUfmHOrUXf2Vq14J4aCzP+JJBxKYFz4MBptUjf3Fn3mazOg8Rhy8hvs4uyBKtgekPDMH3QfcDMtLmcpdnfCDoPFaPiSdWuaEVpxqd/Cl21HczcQg7qtiuITx50wfR9fLG4cfwo0OQ1XOuiLKFZsJrpWhdTmOWllhR0JmuK4X9dNw1v4bhLkbp/sqPHB4JRzhYuVMJBqfBCPA3pldvJLFdHyxWAgzX+fR0PUi3JmYcerwRIA6/VpYQDmKYVGgRMG2PeGuUBrLW/F19ztpHq6PvxPa7IV91qtAmvRZx2H4Sbgbqdn8grZHNumdbZ05sfqn0w+loR+Hgw5ufcrnaDmTPj7rkszMNUk0xZhTze/R/YctO8HTs2g+r71EY+COBt2HmYqKgu7q+uuKQBSbwWp3Nww3/hr6/jjQSn3D547cdi/2R6agmUW5jrfKQ7m4OvfRl/fQ1vZDO5hD+Th6lDblCmQ15mHP/OFpZIhLE1wYV4Kb1y2Wu978R5cdVqKs00rvHfPQZDQ7HHPPi+n87j0W2Z1or0+eF7b4CqS0PjmYdzy74uwtV8eApwIfBjwe2Lkm7gwKqKvvc8z6U9rDG93I0qmG4U7XhZyrx40rRWPD5UXmT0Q47tfhw5bffDdCzaiJS/vuZmWtr9sG5UNbkPhDMPjU7WXvXMQ9J1VIpFOFUobRqUSq7e0HBqaglUiGMmdzqYtZwtVG5vd1avllbrFhTyLsZmvOwCsM/TK7eTWa6uFkfl/RSu8fc8z3WS4q4yvbzkzlMpOx5dNzYOqy3E1YSp76fSm5a7lkZ1Hl4W/myM+STwImvtp8u2/RTw/GSbN7hqRd972Rz+3bdv3bkbVTCzOVhZwr/9e7B/Kv56EtPhRfoxV/kU+mP0dWKfW+/aRxc/pZGKNpct+QvzjetWnJxxFz1tjoR42Rz+1/+rrd/thvX80eZpH6svyoLXYhLrfStmt8KlGQlUs+6ZcOAhTWKYyS39X2fycMjVdCgFRz7oIZ23ESouVKKck8PliOXLcFx65e/3b7rWmeMwvqe1zHL11KvhUs/CnMsGmDbtZOFqph8+j9OgfHlwlOukQh4mpytmJwdB1JiHpwP/t2rb3wPPiLc520Pd6PtsDk4W8Dc2Krd3YYS+1Jbv35RMYM+BaReUXXaRXhp97eEFlAto7bN0rYU2g5lLwYJNRqQ6XSaQzcHCXDIBezHYyB9tnvaxevQoyddirdmtAZl5gLJ4Jt/vdVMiiW0mt9b/9explxo1zpkHiBTHBNT/XFlacIkw+lDb8Ve11KseX0+CsYSJaiNdaFP9sBIgBVpNlTuIaVoheufhNuA1Vdt+CagxTC5N1Yu+z+ZgY8Ol2ivXjRGBsC1rq4kE9njDwzB1sOoCKnxePT5Z9VkV1rY/TCN2HtpN0xqqrgvSbzaOH22eqar6oizJ1+KefbBzV9WSvTkY3Vm7iF3aZHMua9p9Z3rdkmjimsmd2Au7dlf+X6sDljvVQpVpf30dTha2zChupms9VuO3+kCcaSx37nKDVJEDplO6xj/JmYeU155JXKsB6wOYphUiVJgOvAL4O2PMrwP3AkeAdeCnkmrYQKsz8utNly1vKF//2Y1RygNZF4RVLMaz9rSW6ov0hXm3FnDXWDJ/LyIvm8P/5lfxNzb6owLkTB7vgotb/jVvx6ibRm026jaTdxc9E3vba18QTO8X8njn91fYk7+yTPHUbPOTdflF2b7JRItFleKZql/7abxoqaHivBV3xdskxDTCWisNb6xpWilPghDhQuVUwZ2/aw1KBW3zHvCgWNoVF39tFU7PxhZM6grrRUtv66+vu0G8Xg9etcHLjOPn74n1MTfjsAbjvJSYFmYe/IW5IDlDQtdUPRR15uFbuODoFwF/BvwscJG19sakGjbINgPE6pzkq0bBu1EF0xsZcVk5SC6lWBiUXVresDgHmYwrItdL0znYWN8649MD/vqaC8Bs92QTIbWjH3RO217/HAbT99FsTUnEeBGvepYm6bTBVXVOXJ2B9F201BQsEeurpX+NxHg+9aZzW2cehoY2s+h1qoWZh7rLEacOguf15/u1ui5FHDIT0Zb0dCOWMCmJxjwMyHkpKWVxNU0FsWDbcubBGDMMzAP7rLVfSr5J28BMnej7vfvdyPGWglJdWh89nXMXX0mlFJt2QdnMnYE9+4Mc270/cXvZw5uBj70eIQiDmds92WTGm8ciFPJwzv3ae3xwgZv7+zRda9R4juqLsoSLRXnZHP63vopf3HAZigZo5qE0a9mPr4da4jyfZnPwzes3Zy0LeZjMbqbk7VQmLAjWPAC43nJEl155uj//P0nkwI9aWK8PYu7aNjYOSwvxzpar8xBNKzMPpeyVfZIGPkZNz3DW2g1jzK3AJNCniyZTppCHA1Nbou9L0+DVa8m7VAXTy+bwb/lG82DTTh4fKP7hr8LIDjhzEo60fxEbm7IZH++Sh8XykP59Zyi+862wtFja5j36SQw964Vb9i1++iP4N17nbqytun3b7jxsnXnwv/01in/zISgGMz4zebwrH9Xe44cipmv1T5+k+P9dw9Ar3oi3Z19nfzOCUpuadQKDD4Dix94Dn/qwi0NKslhU1s1uFX/n1W7g4FQBHvKIZP5Wl4Wzlv5//CMbX/9y7X2e+lyGnvi0ho/j33qz+39UJ4yoZWiIoRdd3db7NdaZ3CBOrfi7wf/19CxcfGnnjxtqZX11Ie/W/Nd6n3U5XWvxw+/A/8F3m++4tOC+xzzzEKnoZynDWgovlksxWwswsQdwr+viNW9x2yLwHvfjDD39pzc3BNmDBiIOK0k7d8PwMP4/f4qNL/5r433D2a2YU9/3g6jDIx8H/tEY8w7gKFBKq2Gt/fckGjbI/LOn3chtLdkc5I9WbutSFUzviU9zaf6SikG4+FK8J/yEW2cKwIV4D+vwIjYOe/fD6Gi8I3M//B7cdRtc/nC83WP4t38P/yv/CTU6D/51/wE7d+Gdf6HbMDYO92szliAzDsfvrXz8b34FTs3iXfGjbsP9L8Z71JPbe/xAOJLejP+9b8N3vwW33QJXPqajvxlJIY+3Zx9es9HEA9N4T30unDm1ue3CSxJrlveQR8BjnwJra27D+RfiPfaqxP5et3nPfhHcdEPN+/zvfRv/a1+CZp2H73wdCsfwfuSxTf+ef+N1+N/+enud/RhnHryHPBwee1Wp08/5F+I95ikdP27Jrt3uYuXUbNNd/WDWuNbnhLfvAP6tN8fXrkbtWF/Hv/4LkDsX7/B5zX8hezjWWThv3wH825p3XPxwmWq9z+J+Vr7sMug8cMetcOcP4NKHNc3U5992C/5XvwjlnYe0Bo93med5eM/9ebjnjmi/cOR+8ae+7wNROw+vDr6/pWq7D9w/ttZsFwvzMFl7Taw3ncP/9tc2lzdA197U3jnn451zfnKPv3MX3ourk3b1njc0tDWgtUPhYw29/A14mXE3u3Dt31f+X3EBvpw9hffcn2fomabjv+tlJrYsW/ILeTj3Aoau/tWOH79kOgf3ncFfXmzc2QyOg1/IR8u73iG/kGfHoSMUm+znDQ3hPf8Xu9Ci4O/t3Y/30td17e9129CjnwyPrt0hLX7wz/BvvanpY4SxOFFepxtvfV3779cYZ3K9PfvxXvrajh+n7uO3kkq6kId6F+sRYqFic2oGNjbwrvpJhnrRQS6lkp5rfBEdHtOp9AWzepmJLYH0pc+cl70er0niguInP4D/5Wsran/4C+krmNcrQ0/76eY7DbhInQdr7QVJN2RbWZzDO69Onyubg/V1OH2q1MFIYxXM1JnOwYl7m+8XVSEPmYnNKfFsGJQ96wIYQ6XsLDGtiRwbdx+c5QWhZvJ4F18ez+MHvOyhIMPOcaj3WobND+huLZko5Bm+/MqmnQfpoukcXP+F5oXOCvno8VbZHBy9q732LHZnJjc22Rzcc2fDXfziBswe35xdrJYZh+Ul/PX1+OIx6ok7XW2LNosyHocLmnQe9k3i7UzhqHCtehaNlq1VK0+vHHY0NPMgLehxmpttqkEPv7TWvTwnt97UifOyOZg9gV+M57KzulbD5v+16iI67oDBzASsr7kiWAQFsU7Nxl+kJrwwaBL3EMYgdGO9dZj2cTh3TuJ/S1oQvvZmT9Tdxfd918mN+DotvV+jxEdUS1mwupfNwckmz/X0STfoVO/4tVpsrgNxp6ttWSmVdOMQzVQX76qR8ccPOt9ROsU1P4808yAtiDQEYYzZg1uy9ERgCjZXIFhrIyxqlJC/tuZ6/PU+vMpzcj/4oW7bwjxePwQWD7Jszq1bPnMKDsSwBraQx3vAgzdvT5cHZV9R2ryZtjemqfPytbA7d25esMX9IVmWnrPhR1X4/LpRUC5I+zh8SJ2HfrI5EtxgWc19Z9x5MerrtDy9covvndSlyZ0uKx5a77k2G4QYK8sQk3TigkLeZQ3sVc2PMDi12TmnkHexSGlUK+PPTB4OR7xOKE8SctElpcdqFishEoo68/AepbyTRAAAIABJREFU4ErgrcAB4H8CdwP/O6F2Da5m+eT3TbpMRNXVaPWmTlScVZP9tbWto/37DsCO0a2PP3McJvbijWU6/rvA5sl/MRiRSiIVIrg4hz37Gi5HKhXIGRuH0yfdzECSgraMaOahv9SpX1OhxdepFy7za+f9mrqZh+C5Nnqv1asdFD5Gq1VxOxDOuvZqWViUVNL+8qLrsKZ15iH8vAj+n35xA2ZORD/PV6VX9n1/81wtEkHUzsNTgZ+21v49sBF8fwHw4sRaNqiapAl0wbubAXJproKZKlEucKI6eWJLrYbq/2uoenlTx6pGpOrlfo9Fs/SPYYGcS65whaAaLFuJQ9gWzTz0mcyEu9gJC/jV0PJSl9L7tf5j1rUwl670nFE7XyM73CBFLa0Um+vUzPHeX5RHPTf1up1t8oaG3Xsq/H+emnUzcVE732FR2PB9t7rqlrumqFMtvRW18zAEnA1+njfG7APywIWJtGqQBW/2hh9e2bKqpWmugpkm+ydhZCSewN56o6jl/9eyfWMNLKweYZzJu+DQBF4/W6rrVimtOb7sSvc96biHQh7GMnhh6kLpC57nNc9mVqrMnK2/T7kwvXI7Mw+L6Zp5iJJKulQ1fqjOR3pZsbkkuRHwmM9pbfBqnWvLzTSeqUmF8gxa7cwwl3ewVCBOWhS18/AtXLwDwJeAPwfeC9yaRKMGWoQCRd50DmbywVRiiqtgpog3NAxTEVMiNlG30mvWVfAOg7LDAN8kZh788pmHpEbXsjk4cxJ/ZaX2/cEHdJiLP+mg6VYCBqW73Gu/SeehhcrM7aZX9tfXXeHGFF0klZ5ro+PXLPi3ejljUk6fahy43S3TOZg7i19WpLPc5jk6fWlaS8bGK8/z0FJnKOxg+b5fel0o5kGiitp5uBq4M/j5tcASsA94SQJtGmh+lM5ANuemEc+eKquCqTd14qYPxTM6PnMcdo/BeNUI+HRZUDaUAnxjr64KFSNSiU3Nl7Lo1Fk6UsjD/im8/ZPu9Z70zEML2Xqky7I5mC3gr6/VvLutTm6Tma+aworGaTufNniufrHY/LW/eww8L/llS8FsY6/fh01j2Ap52LMvuYKo3VA+8zCTdzF19Zat1ZLNuffD/FyshRNle4ha5+GHZT/PAK9IrEWDLsrMQ3l2kqWlpvtLPLxsDv/WmyprJLSh3gh46f86k4eLH5RMMPPoTrf8amHeXaidnIFHPSm+xy9T8TqtkQ2s4oKw2RrkDvnrazBbgEc+IbG/IR3I5lwc0MkZOFhZ08T3fdfJvf/FLT2kl83h33wjfrFYf7lOtfD8m7KZXC+bw7/phtrP9expN9jU4DziDQ2VasAkqedpWkPlqUjPe8CWuxOdke0SLzOOH8SRNV22Vuv3pw8H5+9jka5LRMpFTdX6DeALwH8CX7TWnkqyUQNtYd6t7d3dYMSjPEAurEasEYHk1Sqc045CvnYRwKrAxySCmT3P2xyRmi24C7ak1vWWp5+tdX8hXypa5WVz+HckuMrx5MyWIHXpHxUdzarOAwtzbgS01f9dO+mVSzFnKbtIyuZcQOuZk3BguvK+qIMQmfEuzDyEgduTyf6dZqabpJIulKVCT6vMRGVWvXbePwQdvrVgRlDXGRJR1G7qrwL3Aa8Hjhpjvm2MeZcx5meSa9qAilLd9MA0DA+7E8KiRgS6pW4htxb4Gxsu21KtE/mBKRguC8oOA3zj/t+Ga2HDmIOE1vV6mXEYn6h5vPylRZg7u9lxmT7UcNlKxxJKSSsxaZQxqM2KxF5Y86CVoOmUjrA2OjeVEhM0O36ZieQDptsYAU+Ct2u3GwCqdbxWVlwnLO3nisw4LCwEQerHWz/3TR10S9kK+dS+L6R3oi5b+jzweQBjzCTwBuCXgV8ChhNr3SCKkEvZGx6GyYPuTT087N7gjWYqJB61Cue06tSMK+hU40TuDQ3D9MHKmYckZgWCmYdE07SG6gVyzlRdzDdYthKHrjxXad/EPti5u+aFftv/u/L36wMvj/QrflrXdpc/1wc9pPK+mbwblKiekaiWGXfr25PUT8uB6p2bwhitfmlnuzIT7pyaP+pm4FrtfO/Y4V4zQWwaIyNu2atIBFGXLT0Nl23picC5wHXAm3DLmKQFftQS8Fl34vP27HUzFT0eydkWqgrntKXZKGp54ONMHu/8i9r/W/Vkxt1FeiHvLtgmkqso62Vz+Ld9d+sdVReEDZetxGEm+ecq7fM8D7J1spkV8m6AZOpgaw9aPZMXRVpHWBukkvYLeZg66AadGvDGJvBPHEuqhS52ZSaP9+ArEvsbrfCyOfxbvrH1jkGZpQwz6wXLQdt6PkEsmje6EzITylQnkUXLiwefBW4H/hj4qLV2PbkmDbiFOZjY23Q3d1F2Cxw8kr5RspTaUjinDc1GUUtB2WvJBfh6mQn8e34YBAUeSvYDIZuDr34Rf23NjWQFNlMHHtrcjwZrkDvUlecqncnm4OhdW7fPBFm5doy29HDVM3mRLMyncia3lEq61rkp6mh/0jEPZ081DdzuqmwO/vvz+CvLFZvbSWvaj7zMhBuQueMHbkMbx92bzuHf+F/4+6dSl0RAeivqcPYTgA8BzwfuNsZ8zhjz28aYxyfXtAG1OB+tumk2B8tLbqQobaNkadZpVqBC3k391gu4DoKy135wc3IBvuFFQjcqvWZztatHF/Kwd79bewwNl63EYiahJWASGy+bg9kTLi6oTEeZb1pN1xosG03lTG6NwmcuU1XE9e6ZcVgK1sgnod9G9EvpWqtSSRfyMD6RrirjtYQzD3f+IFi2FjFpQLlszi1lmz2h6wxpSaQzqLX2y9baP7bWPh24Avga8Ou4DEzSioW5SG/S0gn43js189BFFYVz2uAHxZrqjYCH/9fVG6+vuB2rzITLGjXbRhBdi0rLs6ovaqou5hsuW+mQCxg80T8XLVLbdA421l1cULkOapGExeciv1+jLhvtQzXPTXNnYGUpWsc5M+46+nUKp3Wq3+KO6gWZV5+bUiu8jrj3Tpg+6GanWqTrDGlX1JiH5wFPwsU8XAzcALwbxTy0xC9uwOJCtDdpeHLb2MAb04hA15QXzpnY03z/aoU8HDrS+PGBlW98peJ2rMLX18ZG8h+SWRe/4M8cq1yOVMiXKktv7ltn2UqnTs26i9I+uWiR2rzs4c06J2EqzcV5mL+v/f9di+mV/cVogzd9KZuD1RVX1yEsBtbKaP9YWQHJJI5BmOCjWeB2t4SvserZzkIe78IH96BBMYvjPJ8tu85I6/tCeiJqzMPrcB2FNwDXWWuXkmvSAFtsobrpVBa8Ibe0RSMCXVNROKfFzoMbAc/jPeTh9XcKgrLXf/j95AJ8y15fXjaB4ORy4xOwO1MxuuevLLvc+1UXNF42h//Nr+JvbDQN7mxJvy2XkNrK417CjmWwpKTVNK2hikD8KLVZ0jzzMF32XIPOQyuj/aU18knFPRTyMNk8cLtbvLFxGN9TeW5aW3MzX9kf62HLYlIWo9D2uW+qLI13St8X0htRU7U+KeF2bA8tpAn0RnbA5LTWInZbWeEc7wEPau13T5+C9cYj4JtB2ccTC/D1MuOUFjYkvWzJ87bGiczUSYVYvmxlOr7aE4MSADnw9u6H0dHKi7lOl7q0ml55Ya7tjkrPlZ+bLr7UbSvkXYa4yQij/eHnTkK1HsIlm32l+tw0e8It3eq3drbBG9nhBqBWltp+Pt7Ona6g35mTus6QlkRdtrQT+D3gRcCktXavMeapwMXW2ndHfIzXAVcDHvABa+01Vff/HPAbwc154NXW2m9FexopEZy0I08PBgGGGhHoovLCOa0KijU1vTjJ5pINZg5fX6OjnVXKjsjL5lzQXqjOTECtZSuxmMnDjtHNpRzSl7yhIZd7v/y9VZ2Vq1WtpldO8cwDk9nN4qGhQh4ms+5CspngvOAvzMee8cwFbufxLmyzPk5CXHa7mzc3tFmQsG9lxmFlqbNZ12wu6Dyk9H0hPRE15cQ1wGXAz0FpUPNm4NVRftkYcxmu4/BI4KHAs4wx1Qnu7wCeaK19CPAHwPsjti09wpmHiCnRSicEjQh0TUXhnBaV1tY2WSoU/l8TW2YTvr6mc93JKjOdg5MF/HWXwbl0HKovCBtVGe5Av1S1lQiqsyMFS3C8nbvaerhW0iv7xSIszqf2fOqKh2a3ztxEvRBOcuZh7iwstz8CnpjpHJyexV9dAVxsFtD0HJ0a4f+0g+Ou6wxpR9SYh+cCF1prF4wxRQBr7b3GmAaRoRUeDFxvrV0EMMb8J/A84G3hDtba/y7b/3rgnIiPnRp+qwWKgg+F1KeUS5tsDj9/D36hTkGlPWUpSMsV8jCywxV0avL4QHLLbMLXV7dG17I5KBbhtlvwD0zB3XfA+B635rhcuGzlrts3j+2uMbw9HcZ99FNVW2nIy+bwb7rBpaD2wM/f0/n/LpvDP3Z3/fdraGnJLVlJ8/m0+tw0k8e74OJovxu+H2OKefBXVuDsSXfjnjuAPow7ClJJr97yTfzR3XD3D12M1viAXChnJtzM24Fs+4+R1XWGtC5q52G1el9jzDRwMuLv3wT8oTFmElgCngF8vcH+Lwf+OeJjp0cp5iHaics7fK6b5tmr5Rjd5B06gv8fn6X426+qvcM5FzD85nds2Rx1BNzLnYcPeLlzY2htDbvHYOcuvFx3+t/eoSP4QPHtv7O5sUY2E29oCA4ewf/ytfhfvjbcyNCffACvzQwtfrEIM8fxLruyrd+XLjt0BNbXKP7O5nvLe/xTO3pI79A5+Dd/o/77tVqnndUe8g6dg3/TjZXPtVF2t/LfHRmBXbtjm3koXvN7UF1d/mDU8cTu8HLn4ANnfv9XNjdecPHAFJP09h3Azx52/9t2HyOn6wxpXdRX3N8AHzHG/AqAMSaHW8r0ySi/bK39rjHmT4FrcfEM3wJqVqk2xjwZ13l4XJ37Xwm8MnhcpqamGBkZYWqqjQIpXTbPBgvA1Hnn4Q03P/T+E5/KWvYgo5dckXzj6kjLsY1T8Rdew8pDH+FGKausfPWLrHzlS0zu27flhH3y1AxD59yP/U2Ol/+EqygeOsTQxZcl9iG29sf/h+FsjqEujCb5k49l5U1/ih9mEwN2XHwpIzWOw/qv/SFrt3/P/Xz0ThY//VH2rq8y2uZrbGO2wOzaKuMXXMRY8Bjb8TXbLZ0eW/8ZP81K9qDLegPgeYw+9BEMdxCvUnzJq1m5/Mqa79dq3o5Rdj7ycS1Xs+6GKMe2+OJXsXLZw0rP1RsZYefDH1t7JrSGmYm9jG6ssTeG98fM6ZMMX/owdl/1bACG9u5j5yWXd/y4cfInJ1n9rbfhLS+xERQn3HHRg2uem9Jo4+o34C8vdfR8/B97OmuHz2H0koe2/Ls61yan349t1M7Db+GWGH0HGAN+AHwAeGvUP2St/SDwQQBjzB8BR6v3McY8BPgL4OnW2pqzGtba97MZD+HPzs4yNTXF7Oxs1Kb0THGmALsznDx9JvovZc+BHj63tBzb2F36IzU3F+87C9d9gdlbb6lIg+r7PsX8PXgXXRbpeE098PJkj+vEflhadl/dcP8aedNrPb/d43CZS2XrT+wDPsrZ/L142fZGLP3vu2DIhbEJFoO/t21fs10Qy7F9YOVFysJ6sfNzXJ33ay3zZ+/r7G8lJPKxrXqu8/MLML9QZ+dKxd1jLJ+cZS2G90dxaQH/4BEWLttMTT3Xj++7Cx609dj2YzvbNbq78+eTPdLWY+hcm5xeHdvDh6PFA0VN1boKvB54fbBcadZa21IJXmNM1lpbMMacB/wU8Oiq+88D/hZ4sbX21lYeOzUW5tK93lYq88qXB92dPQWrq1p734pgDba/MNd29pd+q2or0tcyEy5ovEO+77vifG0GuotIurW8UM5aOwOlWYLftdY+P+KvfjqIeVgDXmOtPW2MeVXwmO/DpYKdBN5jjAFYt9Y2qLaVPv5CejN9SKA8r3z5dhUqa134XugkgLOQh+ERONC/07si/cIbG8c/HTVUsYH1dVfZWJ0HkW2pYefBGDMGvAm4ArdU6S3AFPB24MeBj0T9Q9bax9fY9r6yn18BvCLq46WSZh7Sb2KfK8xTlW5UI+Bt2D3mMoV0EMDpgtQP4g31R1Vbkb6WmYgnYHplyX2PGGshIoOl2czDnwMPA/4VeDpwOfAgXKfhamutFru1YmEeb7KDlGrSc66i8qGttQoKeVfAqc2sQduR53lu6VKnMw+DUvBJJGmZcVicx/f9zpI1rASxVJp5ENmWmnUefgK4IohVeBdwN66Q25eSb9oA0szDYJjOwbG7KrcV8jB50BVykug6GAn1fd/luX/gZTE3SmRAZSbccqOVJdg11v7jLIedB808iGxHzUqyjltrCwDW2qPAvDoO7fGLRTfCOqaYh7TzsjmYOYFf3Cht82dUqKwtmXEXC9SO+864EVAdd5FoMjEViguWLXm7NPMgsh01m3kYCeoulOY3q29ba/89obYNluUl8IuaeRgE2RxsrMOpWZg66EbAC3m8Cy/pdcvSJzPhOgHtUJC6SEu8zITLFrcwB50sodWyJZFtrVnnoQB8qOz2yarbPnD/uBs1kMKlGcq2lHoV6VqnDsLcWdc51EVsy7zMOP7xLSVfIlGQukiLYp550LIlke2pYefBWnt+l9ox+ILc2p5mHtJvuixd6yVXaAS8E51kfynkXbamA0pCIBJJKT1yZxmX/GXNPIhsZ81iHiQumnkYHPsOwI5RmHGdBn/muNuurD+tGxuHxYWK+JHIZvIwmcUbablcjcj2lAkLM3Y68xB0HhTzILItqfPQJaWTtWYeUs8bGoLpsnSthTx4QzClEfCWhZ3pxYWWf9VXmlaR1oyFy5Y6rPWgZUsi25o6D92imYfBks1tFoor5GFyGm9kR2/blEZtrsEuBalrqZhIZN7oThgd7TzmobRsaWfnjRKR1FHnoVvCk/WYZh4GgUvXehy/WFSa1g547a7Bnp+DpQUdd5FWjcVQZXplGUZHVdldZJuKvFjYGDMJPAPIWWvfZow5DAwF9R+kmYU52LkLb4dGpwfCdA7WVuHMKTcC/ojH9bpF6dRu9pfCMUBB6iIt66S2SmhlSUuWRLaxSDMPxpgnAt8Hfg743WDzRcB7E2rX4FmYV7zDACldtN55q+sYau19e4KZB7/FkVB/RmlaRdqSmYDFGGYelGlJZNuKumzpGuAF1tqnAevBtq8Aj0ykVQPIX5hTdelBEly0+jd/A9AIeNvannnIg+e5OhsiEl1mvOOYB39ZnQeR7Sxq5+F8a+3ng5/94PsqLSx72vY08zBYDkzB8Aj+TTe62+o8tGcs4763uga7kIf9U3g7RuNvk8gA8zqprRJaWYJdWrYksl1F7TzcYoz5iaptVwHfibk9g2thTpmWBog3NAzTB+HUjNugEfC2eEPDrgOx2GK2pZnj6rCJtCOGmQctWxLZ3qJ2Ht4IfNwY8xFgtzHm/wAfBn4tqYYNnMV5VZceNGGcw/4plwJR2tPOSKjStIq0JzMBa6v4qyvtP4Y6DyLbWqTOg7X2euChwM3Ah4A7gEdaa7+WYNsGhu/7mnkYQKWLV13Edmastewv/uI8zN+n4y7SjnbjjMotL+Ep25LIthU5ZsFaey/wtgTbMrhWV2B9XTEPgya4eNUIeIdanXmYOQ6ApwxXIi3zMhMucHFhDvZPtvcgK8uwSzMPIttV3c6DMeYv2QyOrsta+5JYW5Qi/ve/g3/8Xoae+LTGO6q69EDysjn3BtFFbEe8zDj+yULFNn9lBW79Dt7lD9+yv19QmlaRtgWFSv0vX4sfxGp5F12Kd78HRH8MLVsS2dYaLVu6Dbg9+DoLPBcYBo4Gv/cc4EzSDexn/o3X4f/tR5rvGEwPK+ZhwJx7f9idwbvwwb1uSbrVmHnwr/s8xXe+dbOjUC7cNn2oC40TGTDZHIyM4H/+H/D/+i/w//ovKP7V+yL/ur+x4QpkatmSyLZVd+bBWvv74c/GmH8Fnmmt/VLZtsexWTBue8qMw+ICfnHDZY2pRzMPA8nbu5/hd36i181IvyD7i18s4g0F4xnH73XfTxzbOsNQyMO+A3ga+RRpmTeZZeiaT8D6GgDFj74b7vlh9AdYWXbf9f4T2baiZlt6FHB91bavAI+OtzkpE3YGFhca7xcGpmnmQWSrzAT4RVheKm0KZxxqzTz4hbyWLIl0wNu5Ey8z7r727msteDrsPCjmQWTbitp5+AbwR8aY3QDB9z8EvplUw1IhYtYKP5x5GFPnQWSL8H1RvnRpJl/5vdxMXsHSInHJTMCSm0GPZCXo5GvZksi2FbXz8FLgscBZY8wJXAzE44BtGywNQaVOaJ4ppjTzoGVLItVKsUBBoTi/uAEzJ9zPVTMP/vISnD2tmQeRuGTGwfdhaTHa/sHMg5YNimxfkVK1WmvvBB5jjDkXOAzkrbV3J9mwVBiLmC97YQ5GRkCFxES2qu6En5qFjXXwhrbOPIRpWtV5EInHWNn7L8oA17JiHkS2u6gzDxhj9gNPBn4MeFJwe3sLTrR+s5mHxXnITOB5XhcaJZIywcxDqVBcONtwwUUwc6JyOYXStIrEanMGPWLcQ7hsaZeWLYlsV5E6D8aYR+NStr4KeAjwP4Dbg+3bV8STrr8wp3gHkXqqZh7CpUrepVe6GYhTs6VdS8uYFPMgEo9MjZijBnxlWxLZ9qJWmL4G+CVr7SfDDcaYFwDvBB6RRMNSYSzjvkeJeVC8g0ht1cv/ZvKwYxTv4ktdEb5CHoJiVszkYWIv3u6xXrRUZPCUZtDniTQ3vqyAaZHtLuqypYsBW7XtU8CF8TYnXbzhYdidKQV61rUwpzStInV4O3a4UczymYfpQ3DwyObtgNK0isSsxZkHpWoVkaidhx8AL6za9nzcUqbtLTMeaebB08yDSH1BoTjAzTRkc7B3P4yOVgZNF5SmVSRWURN/hLRsSWTbi7ps6fXAPxpjXgvcBZwPXAQ8K6F2pUdmYjPQs57Fec08iDQyNoG/MIdfLMLMcbzLrnTVpqdzmwXjVlfg9KxmHkRi5I2MuODnZjPooZUlGB7BG9mRbMNEpG9Fmnmw1v438ADg3cANwLuAC4Pt21uTmQd/bc2N1GjmQaS+cObhzClYW3XLlsB9D5ctBbUf1HkQiVlmIvqypeVlzTqIbHNRZx6w1p4GPgZgjLk/MAGcSqhdqeFlJvBPztTfIRzN0cyDSH2ZCcjfU1qiFNZx8LI5/Ju/EcxIHKu4T0RikhlvPoMeWllWvIPINhc1VesnjDGPCX5+GXAzcIsx5uVJNi4VmsU8hPdp5kGkLi8zDovzW1OxTufcTMSZU5v3qfMgEq8WZh78lSVlWhLZ5qIGTD8F+Hrw8xuAq4BHAr+ZRKNSZWwCFubdyGgtwWiOp5kHkfrCi5dCHoZH4MA0UDbLMJN3942NK/mASMy8sfHWAqa1bElkW4u6bGnUWrtqjDkCHLDW/heAMeZgck1Licw4+EWX+zqs+1BOMw8izWXGYX0d/+idMHXQpUGG0iyDX8grTatIUhTzICItiDrz8E1jzJuA3wX+CSDoSNyXVMNSo6o6brXSOlJVmBapL3wf3XlrZQfhwJSbiSi4mQfFO4gkIFw26PvN911ZctmZRGTbitp5eDlwObAb+J1g26OBjyfRqDQpLUeql+ZOMw8iTZXeR/NzFR0Eb2gYpg/iH7sbTilNq0giMhOwseE6Bs2sLONp5kFkW4u0bMlaezvws1XbPoWrMr29NZl5YGEehoZg91j32iSSNuWd6+oicNM5uPUmtzxQBeJE4pcpKxS3q8lnlWIeRLa9up0HY8yLrbV/Gfz8i/X2s9Z+KImGpUZw0vUX5vFq3b8454I8vZr3ighUpDKuXprkZXP43/l6zftEpHNeZgIf3CDYZLbxzsvLyrYkss01mnl4EfCXwc8vrrOPD0TqPBhjXgdcDXjAB6y111Td7wHvAJ4BLAIvtdbeGOWxeyrKzIOWLIk0Nlb2HqnuIJTfVudBJH7lMw8N+MUirKrOg8h2V7fzYK19RtnPT+7kjxhjLsN1HB4JrAL/Yoz5J2vtD8p2ezpwUfD1o8B7g+/9bazxSddfmFOBOJFmwg720BBMTlfc5WVzblR0126Y2Nv1pokMvGaDYKG1VfB9LVsS2eYiV5g2xuwDngkcBo4B/2StPRPx1x8MXG+tXQwe6z+B5wFvK9vnOcBHrbU+cL0xZp8xJmetzUdtYy94O3a4E2mjmYc9+7rbKJG0GR2FkR2wfxJvZEflfeFsQzan5X8iSWi2/DYUBlRr2ZLItha1wvSPAXcCrwUeAfxP4E5jzFMi/p2bgCcYYyaNMWO4pUnnVu1zBLin7PbRYFv/yzQosLMwpwJxIk14nudGP2sFRB/IwtAQnoKlRZJRmkFvMvOwvOy+a+ZBZFuLOvPwbuCV1lobbjDGPB/4c+BBzX7ZWvtdY8yfAtcC88C3gPWq3WoNeGxJOm2MeSXwyuBxmZqaYmRkhKmpqYhPJX4n9+xneG2FfTXaUFhcYNfkNHt62L5O9PrYDiod160Wnm0YPnQOu2ocl7nn/TyjD7qcnRGOmY5tcnRsk9PrY3tidCe7ixtMNGjD2txpTgF7prM136f9qtfHdlDpuCan349t1M7DYeDTVdv+DvhA1D9krf0g8EEAY8wf4WYWyh2lcjbiHNzyqOrHeT/w/uCmPzs7y9TUFLOzs1GbEruNnbtYP31qSxv8jQ38xXmWh0ZY7WH7OtHrYzuodFxreKILs5qvdVye9jOsAHMRjpmObXJ0bJPT82M7Ns7SbIGVBm3wT7hVxHOra7Xfp32q58d2QOm4JqdXx/bw4cOR9otaJO6jwGuqtr062B6JMSYbfD8P+CngE1W7fAZ4iTHGM8Y8Cjjb7/EOJZmJ2tO9iwub94uIiPSrzDh+k2xLWrYkIhB95uHRbHowAAAQjklEQVRK4NXGmF8H7sXFImSBrxhjvhjuZK19QoPH+LQxZhJYA15jrT1tjHlV8HvvAz6Li4W4DZeq9WWtPple8TLj+LUqTJeqSyvmQURE+lhmwtUlamQl6DwoVavItha18/ABWliiVIu19vE1tr2v7GefrbMb6RDMPPi+X5kNJug8eJp5EBGRfpYZh0LjyX5f2ZZEhIidB2vtR5JuSKplxmF9HVZXKqdzw9kIzTyIiEgf8zIT+Au3Nt5pRcuWRKRJzIMx5p1Vt19edbs6iHp7qlNgxy8tW9LMg4iI9LFGKcdDinkQEZoHTL+06vb/qrr94/E1Jb1KdRyqT7wLmnkQEZEUyEzA2ir+6kr9fVaWwPNgx2j32iUifadZ56G69oLKu9ZSZ+ahdHss0932iIiItKLeIFi5lWUY3YU3FDVRo4gMomZngOoibVuKtgn1T7oL8zCWwRsa7n6bREREIvLqDYKVW1lWpiURaRowPWKMeTKbMw7Vt3VVDDDmTrr+wlzl1MzCnOIdRESk/41FmHlYXlK8g4g07TwUgA+V3T5ZdbsQe4vSqDRiU3nS9RfmN0/IIiIi/SrCzIO/sqzOg4g07jxYa8/vUjvSbXQURnbUjnnQzIOIiPS7TJ0Z9HIry6rxICJNYx4kAs/zguqcW2MePGVaEhGRfhd+VlV/jpVbXlLMg4io8xCbscxmXYfQomYeREQkBXbuguGR5gHTWrYksu2p8xCXzERFzINfLMLCgmo8iIhI33Mz6E0Kxa0s42nZksi2p85DXDLjlSM2y4vgFxUwLSIi6ZCZ2DqDXk4zDyKCOg+x8apmHjarS2vZkoiIpECEmQfFPIiIOg9xqZ55CH5WwLSIiKRCZqJuzIO/vgYb68q2JCLqPMQmMwGrK/hrq+62Zh5ERCRFvLEGMw8ry+67li2JbHvqPMQlU1mds7RuVDMPIiKSBtXLb8stq/MgIo46D3GprjKtmQcREUmTzDisLLklStVWltz3XVq2JLLdqfMQk1JsQzjjEH5XtiUREUmDcLCrVqG4YNmSp5kHkW1PnYe4hCfd+bP4xQ3Xedi5C2/Hjt62S0REJIqq5bcVloOZBwVMi2x7I71uwMAY3wNA8b1/srltMtujxoiIiLTGy0zgA8zdB7mqO8POg1K1imx76jzE5cA03otfA/edLm3yLnhgDxskIiLSggPTAPinCnhcWnGXf2rG/bB/qtutEpE+o85DTDzPw3vCT/S6GSIiIu2ZOgieB4X81vsKeRcsPbG3++0Skb6imAcRERFxMXoHpmt2HvxCHrI5PM/rQctEpJ+o8yAiIiJONuc6CtUKebzp6kAIEdmO1HkQERERANdBmKnsPPgbG3DyBGTVeRARdR5EREQklM3B/Bx+ea2HUzOwsaHOg4gA6jyIiIhIwAs7CDPHNzcGy5g8dR5EBHUeREREJBR0EMrjHko/q/MgIqjzICIiIqGpQ+57edB0IQ+jo7D3QG/aJCJ9RZ0HERERAcDbuRP2TVZ0HvyZPEwrTauIOOo8iIiIyKbsocp0rUGNBxERUOdBREREypSna/WLGzCTV7C0iJSM9LoBIiIi0keyOTh7Gn95CRbmYX1dMw8iUqLOg4iIiJR42Rw+uHSt8/e5baouLSIBdR5ERERkUzjLUMjjL9wXbDvcu/aISF9R50FEREQ2TZfVeli4D0Z2wP7JHjdKRPqFOg8iIiJS4u0eg4m9MJPHn78Ppg/hDSm/iog46jyIiIhIpWzOzTzM36dgaRGpoKEEERERqeBlc3DimEvTqmBpESmjzoOIiIhUyubgzElYXdXMg4hUUOdBREREKpXNNqhAnIiU61rMgzHmV4BXAD7wHeBl1trlsvvPAz4C7AOGgd+01n62W+0TERERx8sedrUeQDMPIlKhKzMPxpgjwGuBh1trL8N1Dl5YtdvvANZa+7Dgvvd0o20iIiJSJewwDA/DgenetkVE+ko3sy2NALuNMWvAGHCs6n4f2BP8vLfG/SIiItIFXmYcMhOQmcAbHu51c0Skj3i+7zffKwbGmNcBfwgsAZ+z1v5c1f054HPAfiADXGWtvaHG47wSeCWAtfZHVldXGRkZYX19PemnsC3p2CZDxzU5OrbJ0bFNTj8e29NveR3exB72vfEPet2UjvTjsR0EOq7J6dWxHR0dBfCa7deVzoMxZj/waeAFwBngb4BPWWs/VrbPGwDPWvt2Y8yjgQ8Cl1lriw0e2j927BhTU1PMzs4m+Ay2Lx3bZOi4JkfHNjk6tsnpx2PrLy+CN4S3c1evm9KRfjy2g0DHNTm9OraHDx+GCJ2HbmVbugq4w1o7Y61dA/4WeEzVPi8HLIC19jpgFzDVpfaJiIhIGW/XWOo7DiISv27FPNwNPMoYM4ZbtvQU4Os19nkK8GFjzINxnYeZLrVPRERERESa6MrMg7X2K8CngBtxaVqHgPcbY95qjPnJYLc3AlcbY74FfAJ4qbW2OwEZIiIiIiLSVNeyLVlr3wy8uWrz75Xdfwvw2G61R0REREREWqMK0yIiIiIiEok6DyIiIiIiEok6DyIiIiIiEok6DyIiIiIiEok6DyIiIiIiEok6DyIiIiIiEok6DyIiIiIiEonn+6muw5bqxouIiIiI9BGv2Q5pn3nwAM8Yc0P4s77i/dKx1XFN25eOrY5tGr90bHVs0/al4zqwx7aptHceRERERESkS9R5EBERERGRSAal8/D+XjdggOnYJkPHNTk6tsnRsU2Ojm1ydGyToeOanL4+tmkPmBYRERERkS4ZlJkHERERERFJ2EivG9AJY8zTgHcAw8BfWGv/pMdNSi1jzLnAR4FDQBF4v7X2HcaYtwBXAzPBrr9lrf1sb1qZXsaYO4E5YANYt9Y+3BhzAPhr4HzgTsBYa0/3qo1pZIx5IO4Yhu4P/B6wD71uW2aM+RDwLKBgrb0s2FbzdWqM8XDn32cAi8BLrbU39qLd/a7Ocf1fwLOBVeB24GXW2jPGmPOB7wLfD379emvtq7rf6nSoc2zfQp33vzHmTcDLcefi11pr/7XrjU6JOsf2r4EHBrvsA85Ya6/Q6za6BtdbqTnXprbzYIwZBv4c+HHgKPA1Y8xnrLW39LZlqbUOvNFae6MxZgK4wRhzbXDf/7bW/r89bNugeLK1drbs9m8Cn7fW/okx5jeD27/Rm6alk7X2+8AVUDon3Av8HfAy9Lptx4eBd+M+2EL1XqdPBy4Kvn4UeG/wXbb6MFuP67XAm6y168aYPwXexOb7/3Zr7RXdbWJqfZitxxZqvP+NMZcALwQuBQ4D/2aMudhau9GNhqbQh6k6ttbaF4Q/G2PeDpwt21+v22jqXW+9lJSca9O8bOmRwG3W2h9aa1eBTwLP6XGbUstamw97stbaOdwIwpHetmrgPQf4SPDzR4Dn9rAtg+ApuA+vu3rdkLSy1n4ROFW1ud7r9DnAR621vrX2emCfMSbXnZamS63jaq39nLV2Pbh5PXBO1xs2AOq8Zut5DvBJa+2KtfYO4DbctYTU0OjYBqPhBvhEVxs1ABpcb6XmXJvmzsMR4J6y20fRxW4sgunHhwFfCTb9sjHm28aYDxlj9veuZanmA58zxtxgjHllsO2gtTYP7mQCZHvWusHwQio/yPS6jUe916nOwfH5ReCfy25fYIz5hjHmP40xj+9Vo1Ku1vtfr9n4PB44Ya39Qdk2vW5bVHW9lZpzbZo7D7Wq4Cl1VIeMMePAp4HXW2vvw02PPQC3NCQPvL2HzUuzx1prr8RNP77GGPOEXjdokBhjRoGfBP4m2KTXbfJ0Do6BMea3ccsYPh5sygPnWWsfBrwB+CtjzJ5etS+l6r3/9ZqNz4uoHKzR67ZFNa636um7122aOw9HgXPLbp8DHOtRWwaCMWYH7oX8cWvt3wJYa09YazestUXgA2iKty3W2mPB9wJuTf4jgRPh1GPwvdC7Fqbe04EbrbUnQK/bmNV7neoc3CFjzC/gAlJ/zlrrAwRLak4GP9+AC6a+uHetTJ8G73+9ZmNgjBkBfoqyZBV63bam1vUWKTrXprnz8DXgImPMBcGo4wuBz/S4TakVrF/8IPBda+2flW0vX1f3POCmbrct7YwxmSAoCmNMBngq7jh+BviFYLdfAP6+Ny0cCBWjYHrdxqre6/QzwEuMMZ4x5lHA2XDKXZoLsgX+BvCT1trFsu3TQfA/xpj744Ikf9ibVqZTg/f/Z4AXGmN2GmMuwB3br3a7fQPgKuB71tqj4Qa9bqOrd71Fis61qc22FGSo+GXgX3GpWj9krb25x81Ks8cCLwa+Y4z5ZrDtt4AXGWOuwE2R3Qn8j940L9UOAn9njAH3nvsra+2/GGO+BlhjzMuBu4Hn97CNqWWMGcNlXSt/bb5Nr9vWGWM+ATwJmDLGHAXeDPwJtV+nn8WlDrwNlz7wZV1vcErUOa5vAnYC1wbnhjC15ROAtxpj1nHpRF9lrY0aELzt1Dm2T6r1/rfW3myMscAtuKVir1GmpfpqHVtr7QfZGl8Get22ot71VmrOtaowLSIiIiIikaR52ZKIiIiIiHSROg8iIiIiIhKJOg8iIiIiIhKJOg8iIiIiIhKJOg8iIiIiIhKJOg8iItI2Y8w/B8XO4nzMtxhjPhbnY4qISDxSW+dBRETiY4y5E1eTpDzv/Yettb/c6PestU9Psl0iItJf1HkQEZHQs621/9brRoiISP9S50FEROoyxrwUuBq4EXgJkMdV5v18cP8XgI9Za//CGHMh8EHgCmAN+Ly19gXBfo8B3gFcDNwKvM5a+9/BfRcAHwauBK4Hvl/VhkcBfwZcAtwV/O4XknrOIiJSn2IeRESkmR8FfghMAW8G/tYYc6DGfn8AfA7YD5wDvAsg2PefgHcCk7iOwD8ZYyaD3/sr4Ibg8f8AKMVQGGOOBL/7/wAHgF8FPm2MmY73KYqISBSaeRARkdD/Ncasl93+NdwMQgG4xlrrA39tjHkj8EzgL6t+fw24H3DYWnsU+HKw/ZnAD6y14f6fMMa8Fni2MebfgUcAV1lrV4AvGmP+oewxfx74rLX2s8Hta40xXweeAXwkhucsIiItUOdBRERCz62OeQiWLd0bdBxCdwGHa/z+r+NmDr5qjDkNvN1a+6Fg37uq9r0LOBLcd9pau1B137nBz/cDnm+MeXbZ/TuA/2jliYmISDzUeRARkWaOGGO8sg7EecBnqney1h7HxUdgjHkc8G/GmC8Cx3CdgHLnAf+Ci6HYb4zJlHUgzgPCv3UP8JfW2qvjfEIiItIedR5ERKSZLPBaY8x7gOcCDwY+W72TMeb5wHXBkqXTuA7ARrDvu4wxPwtY4Kdxwc//aK2dDZYh/b4x5reARwLPZrNz8jHga8aYnwD+DTfr8CjgtuDviIhIF6nzICIioX8wxpTXebgW+HvgK8BFwCxwAvgZa+3JGr//COAaY8zeYL/XWWvvADDGPAuXbem9wG3As6y1s8Hv/SwufuEUcB3wUWAfgLX2HmPMc4C3AZ/AdUa+Crw6rictIiLReb7vN99LRES2pSDm4RXW2sf1ui0iItJ7StUqIiIiIiKRqPMgIiIiIiKRaNmSiIiIiIhEopkHERERERGJRJ0HERERERGJRJ0HERERERGJRJ0HERERERGJRJ0HERERERGJRJ0HERERERGJ5P8H5Jw9lwt7ozYAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 936x360 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAxcAAAFRCAYAAAAGvlBkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzs3Xl8XfV57/vPI8vyDMYWxpaNMYMB24BneZ7wEGZCAj8CJAFC6tAmJ+1J7m2TNKfpaW7vTXvSIb3N6S0puUl60iS/m6SQEBI8YUueJM/YxsbY2IAnPONZsrSf+8faBqHIg4y015b0fb9e+6W9fnvttR7psUDP/k3m7oiIiIiIiHxYBWkHICIiIiIirYOKCxERERERaRIqLkREREREpEmouBARERERkSah4kJERERERJqEigsREREREWkSKi5ERFJkZj8ws3k5uI+b2Seb+z65YGZ/aWZb045DRER+n4oLEZFLkC0KvIHH8UZe6o+Bh5ojxpbOzCZmf6YD6r30bWBs7iO6MDP7upntSDsOEZG0FKYdgIhIC1YOhHptmcZcwN3fbbpwWiYzK3L36os9392PA40t4kREJAfUcyEicumq3X1vvce+sy+a2UIz+76ZfcvMDpjZUTP7NzPrVOecDwyLMrMhZvaSmR0xsxNmtsnMPlXn9T5m9tPs66ey9xhVNygzm2Zmr5jZ6ezXafUDN7Orsvfeb2bHzGyJmU0+3zd7NlYz+5KZ7TKzk2b2CzMrrnfeJ8xsbfb+O8zs782sS72fy7Nm9k0z2wPsauBeA0iKN4Dt2R6MhdnXPjAs6uyxmQUzez0b13NmdpmZfczMXst+jz83s8sbE+s5fg5fM7M3zKwq+/N7ycw6mdkTwDeBa+r0ZP1l9j2F2Ti3Z++10cw+V++6bmZ/nP2ZnjCz3Wb2pfPFIiKSb9RzISLSvB4EfgZMAm4AngVOAl88x/k/ATYA44HTwE1AOwAzM+A5oANwD/Au8HVgrpkNdPcDZlYCvABE4BNAX+A7dW+QLW5eBjYBdwJHgIez1xnm7pvO8/2UZuO/A+gJfA/4PnBf9tpPAP+Q/f6WAP2AfwauBD5V5zoB+DEw/ez3V8/bwP3A89l7vg2cr3ejD/A48HHgCuDn2UdN9l6XZY+/BvxZI2N9j5l9DPgK8BiwDugBTM2+/DPg5uxro7NtZ3tY/g0YAXwOeD37Pf2rmdW4+7N1bvGN7OOrJLn5OzPb4e6/PM/3LiKSP9xdDz300EOPRj6AH5D84Xq83uPXdc5ZCOwA2tVpmw1UAV3qXGdendffBZ44xz2nAw4MrtPWAdgD/EX2+P8A3gQK65xzT/Z9n8wePwHsrHtOtn0B8I8X+J6PA5fXaZuVvfbA7PEO4Ol675ucPeeKOj+XLUDBBX7GE7PvG1Cv/S+BrfWOa4DiOm3fBWqBK+u0fQdYWef4grE2ENN/zcbe/hyvfx3YUa/tWpLhcjfXa/8LYG2dYwf+vd45/wEsTvvfux566KHHxT7UcyEicukqSD4tr+tkveNKd6+tc7wEKAKuB15p4JrfBv4t+6n6QuBX7r46+9oQ4KC7v3r2ZHevMrOK7GsAg7P3rKlzzcX17jEa6A0cSTpD3tMBONVATHW96h+cJ7Ik+3WQmR0BrgH+3sy+Xeecsze5AViRfb7K3Rs1P+UCdrn7gTrHe4G97r6/XlsvADO7shGx1hVJejreNLM5wHzgOXc/dp7YRmWvu7Lez7uQpACqa1m94yUkvUQiIi2CigsRkUt3yt0buySqne9Fd/+mmf2Y5A/K24GvmdnfuvvXz55yjmt6A8/fu2y94wKSIVEPNHCt+sVRY5ydx/fHJMOu6ttZ5/mJD3Gfhpypd+znaDsbY2Niff8C7rvM7GZgGkl+/hvwN2Y2xt3fPkdsZ+81nt//+TaUz7rO++9FRCTfaEK3iEjzGm1mdecUjCOZO7DtXG9w9zfc/X+6+4MkQ2f+MPvSRqDYzAafPdfMOpCM399Y55wx9e45sd4tVgLXAUfdfWu9x+4LfD+DzOyyOsfjs183ufs7JHMjbmrgulvd/fQFrl3f2TkWDc3J+FA+TKzuXuXuv3P3PwVuBToDH60Tc/14V2W/9m/gPvX/HdRfYnccSSEoItIiqOdCROTSFZlZ7wba33H3s59I9wS+a2bfIfmD/pvA99z99z65N7OuwN8AvwC2A91JejDODoNaAFQC/2FmnyeZn/HfgI7Av2TP+RfgS8Az2eE+JcBf17vVj0nmDvzGzP6cZA7BVSSfxG9y9+fO8z078CMz+zrJZObvAr9x99ezr/858Gx2iNRzJL0Hg4A73f1zDV3wPN4kmatwl5n9DKjypl26t9GxmtlTJB/MVZJMhJ8OdOP9HG0HepvZOJKJ2yfdfauZfR/4npn9KcnQpy7ASJI5IX9T5xb3mNkXgJdIcv8wycR8EZEWQT0XIiKXbhLJZOr6j551zvk5cIxk3sNPgReBPz3H9WpIVjp6luTT6peAd4BHAbIFy0eBzcBvSOYE9AZmnp1v4O67gHtJejPWkkxi/sByptlP5aeQ9GD8vyTFxS+z73nzAt9zZfZ7mZuNbyPwZJ1r/zvJ6kx3Z89dQTLh+veWm72QbO/CV0lWZ9pDsnJUk7nEWA+TfL8LSXL0JWC2u8/Pvv4c8P+R5Gc/7+d6NsnKVH9OUojMJ5mv80a96/8VMINkJaqvAV91959f4rcoIpJz9v6HayIi0pSy+zJsdffPph1LUzCzHwD93H1G2rG0RmbmwKfc/X+lHYuIyKVSz4WIiIiIiDQJFRciIiIiItIkNCxKRERERESahHouRERERESkSai4EBERERGRJtGa9rnQ+C4RERERkaZhl/Km1lRcsHv3hTaWbVrFxcUcOHAgp/eUhikX+UO5yB/KRf5QLvKHcpE/lIv8UT8XJSUll3wtDYsSEREREZEmoeJCRERERESahIoLERERERFpEiouRERERESkSai4EBERERGRJpGT1aJCCFcDPwJ6AxngmRjjd0IIPYCfAQOAHUCIMR4OIRjwHeAu4CTwRIxxdS5iFRERERGRS5Ornosa4MsxxkHAWODzIYTBwFeA+THGgcD87DHAncDA7GM28C85ilNERERERC5RToqLGOOesz0PMcZjwCagL3A/8MPsaT8EPpp9fj/woxijxxiXA91DCH1yEauIiIiIiFyanM+5CCEMAIYDFcBVMcY9kBQgQK/saX2Bt+u8bWe2TURERERE8lROd+gOIXQFfgH8SYzxaAjhXKc2tN24N3C92STDpogxUlxc3FShXpTCwsKc31MaplzkD+UifygX+UO5yB/KRf5QLj7Iq6s4vWwhhddcT/sBN+T03k2Zi5wVFyGE9iSFxY9jjL/MNr8TQugTY9yTHfa0L9u+E7i6ztv7AbvrXzPG+AzwTPbQc72FvLatzx/KRf5QLvKHcpE/lIv8oVzkD+Ui4Tu34+Vz8eUL4eRxbNYDFDz0ZE5jqJ+LkpKSS75WrlaLMuBZYFOM8e/rvPQr4HHgW9mvz9dp/0II4afAGODds8OnRERERERaMt+/F68sw1eUw643obAQGzEemzgTbro17fA+lFz1XEwAPgWsDyGszbZ9jaSoiCGEp4C3gIeyr71IsgztVpKlaHNbvomIiIiINCF/9zC+cjFeWQZvvJY0Xn8z9shsrHQy1vWydANsIjkpLmKMi2l4HgXA9AbOd+DzzRqUiIiIiEgz8hPH8dVLkx6KzevBM9DvWuxjj2Olk7CevS58kRYmpxO6RURERERaMz9zBtZVkFm+EDashtoa6NUHu/uhpIeiz9UXvEZLpuJCRERERORD8t1v4Yvn4stehuNHoXsP7Pa7sdLJcM0NmJ1rEE/rouJCREREROQS+MF9eGV5Mo9i53ZoVwjDSimYOAsGD8UK2qUdYs6puBARERERuUjuDhtXk5n7PLyaXafo2huxhz+bDHu6rHu6AaZMxYWIiIiIyAX4yeN4RRm+8EXY/RZc3gO771FszBSsV5+0w8sbKi5ERERERBrgmVp4bQO+bAG+aglUV0P/67An/yRZ7amwfdoh5h0VFyIiIiIiWZ7JwPYt+IpyfOViePcwdOyEjbsdmzQLu+aGtEPMayouRERERKRN85ozSQ/F2uX42go4cggKC+HWURSUToZbR2MdOqQdZoug4kJERERE2iQ/chBf8Bu87CU4cQyKOsAtI7BhY7GhpVjnLmmH2OKouBARERGRNsPd4fVX8fKX8BWLIVMLw8ZQMGEGDBqKFamH4sNQcSEiIiIirZ4fPYwvexkvnwvv7IJOnbEpd2DT79VqT01IxYWIiIiItEpeWwub1pIpnwvrKqC2Fm4YjN31IDZyAtahY9ohtjoqLkRERESk1XB32LYZr1yEr1wCx96FrpclPRQTZ2J9rk47xFZNxYWIiIiItHh+5CC+ZD6+ZB7s3wvti7DbRmOlk+G2UdqTIkdUXIiIiIhIi+RVp/G1FXhlGWxYBZkM3HQrds8nsOFjsU6d0w6xzVFxISIiIiIthp85AxtX45Vl+LpKqK6C7j2xWQ8kw56uKkk7xDZNxYWIiIiI5DXPZOC19UlBsXopnDwBXbth46Ylw55uGIwVFKQdpqDiQkRERETy1O/No+jQCRs+JikoBg3DCvWnbL5RRkREREQkb/jJ4/ia5XjFIti8Hjw7j+K+R7Hh47AO2uQun6m4EBEREZFUeVUV/sqK7MTslVBTA1f2xu58EJtwO9ZL8yhaChUXIiIiIpJznsngm9bhS+fjayqg6hRc3gObelcy7GnAQMws7TClkVRciIiIiEjOnJ1HcXDZAjLv7IbOXbDSSUlBceMQrKBd2iHKh5CT4iKE8H3gHmBfjPGWbNvPgJuyp3QHjsQYh4UQBgCbgNeyry2PMT6dizhFREREpOl5bS1sWEWmfA6sXwmZDAW3jCBz7yPYiHFY+6K0Q5Qmkqueix8A/wz86GxDjPHhs89DCH8HvFvn/G0xxmE5ik1EREREmoGfOomXz8Hn/xoO7YfLumMfeQCbMJMeQ27jwIEDaYcoTSwnxUWMsSzbI/F7QggGBOD2XMQiIiIiIs3HM7Xw2gZ8RTm+cjGcOgk3DqHg4afgtlItH9vK5UN2JwHvxBhfr9N2bQhhDXAU+HqMsTyd0ERERETkYviut/DFc/EV5fDuIejQMVk6dvo92ICBaYcnOZIPxcUjwE/qHO8B+scYD4YQRgLPhRCGxBiP1n9jCGE2MBsgxkhxcXFOAj6rsLAw5/eUhikX+UO5yB/KRf5QLvKHctG0MieOU7XsZU7N/RVntmyEwkI6jBhHx8mz6DBqAtah4znfq1zkj6bMRarFRQihEPgYMPJsW4yxCqjKPl8VQtgG3AisrP/+GOMzwDPZQ8/1uL3i4mKNFcwTykX+UC7yh3KRP5SL/KFcfHheXQWvrCBTWQbrV0HNGejdD3voSWzsNGou685x4Pix43Ds+Dmvo1zkj/q5KCm59H1F0u65mAFsjjHuPNsQQrgSOBRjrA0hXAcMBN5IK0ARERGRts5ramDTOryyDF+7HE6fSiZnT7kDGz0JrrtJe1IIkLulaH8CTAWKQwg7gW/EGJ8FPsEHh0QBTAb+KoRQA9QCT8cYD+UiThERERFJ+JlqeHUdvnY5vrYCjh9N9qQYNTHZk+KmW7Qnhfwec/e0Y2gqvnv37pzeUN15+UO5yB/KRf5QLvKHcpE/lIvze28/imULYMNqqDoNnTpjt47GRk+EISOw9u2b5F7KRf44x7CoS+qKSntYlIiIiIikzPfvxRfPw5fOgyOHkiFP46Zhw8YmPRSFTVNQSOun4kJERESkDfIzZ5IhT+VzYNM6sAK4ZQQFjz4Nt47SfhRySfSvRkRERKSN8Npa2PxKMjF7zXI4dQJ69sLufxQbPwProaVh5cNRcSEiIiLSyvnenckGd8tehqNHoGOnZIO7sVPg5qFYQUHaIUoroeJCREREpBXy6ip81VK8/CV4/VUoKIDbRlMwdhrcOhIr6pB2iNIKqbgQERERaUX8rTfwxXPw5YuSYU9X9sY+9mls3O1Y9x5phyetnIoLERERkRbOT51M5lGUz4E3t0Jhe2zEeGzyLBg4RMOeJGdUXIiIiIi0QO4O2zYnvRQrFkN1FfS9BvvEbGzsFKxLt7RDlDZIxYWIiIhIC+Lv7E56KSrLYO9O6NAJGzMFmzQLBgzE7JL2PhNpEiouRERERPKcnziOVyzEly5Ihj2ZJcOdZn0UGz0J69gp7RBFABUXIiIiInnJ3WHLxmTY06qlcKYa+l+HPfSZpKC4omfaIYr8HhUXIiIiInnE3z2ML1uAl8+FfbuhUxdswgxs0kys//VphydyXiouRERERFLmJ47jq5fiK8ph83rwDAwcjN0dsJETsA7ak0JaBhUXIiIiIinwTAY2r8PL5+Jrl0NNTbInxV0PYmOmYn36pR2iSKOpuBARERHJId/zdrLa07KX4eA+6NINm3InNmYqDLhBqz1Ji6biQkRERKSZ+dHD+LKX8YpF8Pb2ZLWnm29Lds4ePhZrX5R2iCJNQsWFiIiISDPwTAY2riFT/hK8sgJqa+HaG7GHP4uNmoh175F2iCJNTsWFiIiISBPyqqpktad5v4J3dkG3y7EZ92ETZ2K9NY9CWjcVFyIiIiIfkp8+BRtW4Wsq8PUr4dQJuOYG7LNfxkaOxwrbpx2iSE6ouBARERG5BH76FL6uMlk+duMaqDkDXS/DRozFxk9PdtDW5GxpY1RciIiIiFwkz9TCxrXJsKd1FVBdDVcUY1PvxIaPhRsGYQXt0g5TJDUqLkREREQuwPfvxZcuwJfMg8MHoGs3bPx0bPTkbEFRkHaIInkhJ8VFCOH7wD3AvhjjLdm2vwT+ANifPe1rMcYXs699FXgKqAW+GGN8KRdxioiIiJzl7x7GVy5Olo/dviVZPnbwMAoe/iwMHa15FCINyFXPxQ+AfwZ+VK/9H2KM367bEEIYDHwCGAKUAPNCCDfGGGtzEaiIiIi0XX7iOL56aTKPYvN68Az0G4B97HGsdBLWs1faIYrktZwUFzHGshDCgIs8/X7gpzHGKmB7CGErUAosa674REREpO3yqir8lUq8sgw2rIKaGriyN3bXg1jpZKykf9ohirQYac+5+EII4dPASuDLMcbDQF9geZ1zdmbbRERERJqEZzKw+RV86Xx8bQVUnYbuPbCpd2Olk2HADVrpSeQSpFlc/AvwTcCzX/8O+AzQ0G+yN3SBEMJsYDZAjJHi4uLmifQcCgsLc35PaZhykT+Ui/yhXOQP5SJ/2LEjdHz5BU7Pf4Had3ZjXbvRafIsOk6eRftBQ7F2WukpV/R7kT+aMhepFRcxxnfOPg8hfA94IXu4E7i6zqn9gN3nuMYzwDPZQz9w4EAzRHpuxcXF5Pqe0jDlIn8oF/lDucgfykX6fOcOfO7zydCnmjNw823YfY9iw8dS3b6IaoDDh9MOs03R70X+qJ+LkpKSS75WasVFCKFPjHFP9vABYEP2+a+A/wgh/D3JhO6BQGUKIYqIiEgL5ocP4ivKk4Liza1Q1IFOM++jasJM7KpL/+NJRM4tV0vR/gSYChSHEHYC3wCmhhCGkQx52gF8DiDGuDGEEIFXgRrg81opSkRERC6GZzKwcTWZeb+CTevAHfpfjz30JDZhBpddc60+LRdpRrlaLeqRBpqfPc/5fw38dfNFJCIiIq2JHz2Mr1iCL/ot7HkbuvfE7nk4We2pd7+0wxNpM9JeLUpERETkkvjJ4/ia5cmwp02vJHtS9L8ee+q/YqMmapM7kRSouBAREZEWI9mTYkV2T4qV7+9JcWd2T4q+2pNCJE0qLkRERCSveU0NbFqLV5bhayqg6hRc3gObeld2T4qB2pNCJE+ouBAREZG85IcP4i+/gJfPgePHoHNXrHQSNnoS3HQLVqA9KUTyjYoLERERyRvuDls34WW/w1eUQ8Zh+BgKxk+HIcM1j0Ikz6m4EBERkdT5kUN45SK8fC7s3QkdOyXDnqbfi13ZO+3wROQiqbgQERGRVPiJY/iqpcnk7C0bkj0prr8Ze+KL2MgJWMdOaYcoIo2k4kJERERyxqtO42srkiFPG1ZDbQ30KsHuzu5J0Ud7Uoi0ZCouREREpFl5zRnYsDpZ7WldJVRXJZvcTb8nWe2p//Va7UmklVBxISIiIk3Oz1TDpnXJJnerl8HJ49C1GzZ2GjZmMtwwGCsoSDtMEWliKi5ERESkSfiZ6mTI06olyZCnqtPJxOyhpdiYKTBoGFaoPz1EWjP9houIiMiH4rvexMvn4MteTnooLr8CGzMVGz4GbroNa6/lY0XaChUXIiIi0mh++hS+ojzZ4G77FigsxIaPwybOhJtv05AnkTZKxYWIiIhcNN+/F5//a3zxPKg6BX2uxsJTyVyKbpelHZ6IpEzFhYiIiJyXZzKwaR2Zst/BmgooKMBGT8Km3gnX3aSVnkTkPSouREREpEF+9Ahe9rukl+LgvmS1pzsewKbdg13RM+3wRCQPqbgQERGRD/Bdb+HznseXL4SaMzBoKPbxx7FhYzU5W0TOS8WFiIiIJDtnr1yCL54DWzdB+yJswnRsxv1Y775phyciLYSKCxERkTbKa2pg87pk5+w1y+H0KejdF3vwSWz8dE3QFpFGU3EhIiLSxvjO7Xj5XLyyDI4fhU5dsJHjsfEzYOBgTdAWkUum4kJERKQN8ONH8VVL8cVzYcfryb4UQ8ckO2ffMlJzKUSkSai4EBERaaW8ugpfsxyvWASvroHaWijpjz38WWzsVKyrhj2JSNPKSXERQvg+cA+wL8Z4S7btfwD3AtXANuDJGOOREMIAYBPwWvbty2OMT+ciThERkdbAjx7GX34RX/jbZNhTj2Jsxn1Y6RS4+loNexKRZpOrnosfAP8M/KhO21zgqzHGmhDC3wBfBf4s+9q2GOOwHMUmIiLSKviut/C5z+EVC5NeittGUzD9XrjpVqygIO3wRKQNyElxEWMsy/ZI1G2bU+dwOfBgLmIRERFpTfzUyezQp4Xw6looKsImzNASsiKSinyZc/EZ4Gd1jq8NIawBjgJfjzGWpxOWiIhI/vEz1bB+JZnKMnhlJZyphp69sPsfw6bcqSVkRSQ1qRcXIYQ/B2qAH2eb9gD9Y4wHQwgjgedCCENijEcbeO9sYDZAjJHi4uJchQ1AYWFhzu8pDVMu8odykT+Ui/zRFLnw2lqq16/idPkcqpYvwk+eoODyK+gw8z46TppJ+xuHaOjTRdDvRf5QLvJHU+Yi1eIihPA4yUTv6TFGB4gxVgFV2eerQgjbgBuBlfXfH2N8Bngme+gHDhzISdxnFRcXk+t7SsOUi/yhXOQP5SJ/fJhc+MH9+JJ5+JJ5cGg/dOqMDR9HQelkuPk2qtu1oxrg0KEmjbm10u9F/lAu8kf9XJSUlFzytVIrLkIId5BM4J4SYzxZp/1K4FCMsTaEcB0wEHgjpTBFRERyzo8fxVcvxSvLYcsGcIfBw5Kds4eVYu2L0g5RRKRBuVqK9ifAVKA4hLAT+AbJ6lAdgLkhBHh/ydnJwF+FEGqAWuDpGKM+jhERkVbNT5/C11Umu2ZvXJ2s9nRVX+yeh7Fxt2NX9k47RBGRC8rValGPNND87DnO/QXwi+aNSEREJH2eqYVX1+JLF+DrKqG6Cq4oxqbfh5VOhv7XaU8KEWlRUp/QLSIi0tb44YN4+Zz351F07YaNvx0bPRluGKSJ2SLSYqm4EBERyRF/axs+93l8RTlkMjBoGAUPPQnDxmCF7dMOT0TkQ1NxISIi0oz80H5OLJ5D7cu/hbe2QYdO2NS7sOn3ah6FiLQ6Ki5ERESamFdXJas9LZ4Hr63nOMCAgVh4CpswHevcNe0QRUSahYoLERGRJuDu8MZreMVCvGIRnDwBV/bG7n+UHh/5KEfad0w7RBGRZqfiQkRE5EPw3W/hy19O9qQ4uA8K22MjxmOTZ8HAZNfswuJi0GZhItIGqLgQERFpJD99Cl+5GF88F7ZthoKCZJO7+x7Fho/FOnVOO0QRkVQ0qrgIIfQE7gL6xBj/NoRQAhTEGHc2S3QiIiJ5wo8dxV9Zga9ZBq+uhTPV0Lsf9tCT2Nhp2GXd0w5RRCR1F11chBCmkGxutxKYAPwtMBD434B7myU6ERGRFPmZM/ja5Xj5HNi8HjwDPYqxSbOwUROTPSm0yZ2IyHsa03Pxj8DDMcb5IYTD2bYKoLTpwxIREUmP73oLXzwHX/4yHD8GPXthdz6IjRgL/a9XQSEicg6NKS4GxBjnZ5979mt1I68hIiKSl7zqNL6i/P15FO0KsWFjsEmzYNBQ7ZotInIRGlMYvBpC+EiM8aU6bTOA9U0ck4iISE64O7y5FS+fg1eWwelT78+jGHc71u3ytEMUEWlRGlNcfBl4IYTwG6BTCOFfSeZa3N8skYmIiDQTP3E82Y+ifC7s3A5FRdjIicnysddrHoWIyKW66OIixrg8hDAUeAz4PvA2UKqVokREpCVwd9iyMZlLsWppstrTNTdgj/0hVjoZ69wl7RBFRFq8Rs2XiDHuIlklSkREpEXw/XvxyjJ86QLYtxs6dcEmzMAmzcT6X592eCIircp5i4sQwr/z/uTtc4oxfrrJIhIREfmQ/NjRZNhTZRls35I0DhyM3R2wkROwDh3SDVBEpJW6UM/F1jrPi4HHgV8DbwL9SeZc/LB5QhMREbl4nqmFza/g5XPxNcuhtgauvhb7+OPY6ElYz15phygi0uqdt7iIMf73s89DCC8Bd8cYy+u0TQT+W/OFJyIicm7uDju2JsOeVpTDu4egc1ds6p3YxJlYvwFphygi0qY0Zs7FWGB5vbYKYFzThSMiInJhfvQwvnQBvngevLMLCgvhlpHJxOxhY7D2RWmHKCLSJjWmuFgD/J8hhL+IMZ4KIXQC/juwtnlCExEReZ9namHjGjLlc+CVFVBbCzcOwT7yADZiPNala9ohioi0eY0pLp4A/gN4N4RwGLgCWAk82gxxiYiIAOAH9+GL5+FL5sHhA9DtcmzGfcmwp9790g5PRETqaMw+FzuA8SGEq4ESYE+M8a3mCkxERNourzkD6yrJlM2BTdkO8iHDKXhNwHY4AAAgAElEQVT4szB0NFbYPt0ARUSkQY3a5yKEcAUwDegL7Aoh/DrGePgi3/t94B5gX4zxlmxbD+BnwABgBxBijIdDCAZ8B7gLOAk8EWNc3ZhYRUSk5fE9O5NN7pYugONHoUcxds/Dyb4UWu1JRCTvFVzsiSGEccA24GngNuBzwLZs+8X4AXBHvbavAPNjjAOB+dljgDuBgdnHbOBfLjZOERFpWbyqiszS+dT+zVfI/MUf4fN/DTcOoeCL36Dg//oeBfc9qsJCRKSFaEzPxT8CfxRj/OnZhhDCw8A/AaMv9OYYY1kIYUC95vuBqdnnPwQWAn+Wbf9RjNGB5SGE7iGEPjHGPY2IV0RE8pRnMvD6xveXkD11EnqVJHtSjL8du+yKtEMUEZFL0Jji4kYg1mv7OfD/fIj7X3W2YIgx7gkhnP1oqi/wdp3zdmbbVFyIiLRgvnMHvnQ+vmIxHDkIRR2wEeOwibOSlZ/M0g5RREQ+hMYUF68DnyBZMeqsh0iGSjW1hv7v4vUbQgizSYZNEWOkuLi4GUI5t8LCwpzfUxqmXOQP5SJ/5EsuMidPcHrJfE7P+zVntmyEwkKKho+l06SZdBg9EevYKe0Qm12+5EKUi3yiXOSPpsxFY4qLPwFeCCF8EXiTZBL2QJJJ2pfqnbPDnUIIfYB92fadwNV1zusH7K7/5hjjM8Az2UM/cODAhwil8YqLi8n1PaVhykX+UC7yR5q58DPVsH4lmYoyWL8SzlRDn6uxh5/CxkyjtttlHAeOHz8Bx0+kEmMu6fcifygX+UO5yB/1c1FSUnLJ12rMUrRLQwjXA3eTLEX7a+DFGOOhS747/Ap4HPhW9uvzddq/EEL4KTAGeFfzLURE8p+/tQ0vn4NXlMGpE3BZd2zSLKx0Mlx3k4Y9iYi0co1aija77Oz/AgghXAd0Ay6quAgh/IRk8nZxCGEn8A2SoiKGEJ4C3iIZZgXwIskytFtJlqJ9sjFxiohI7vjJE3jlIrx8Lry1DdoXYSPHY+OmwU23Ye3apR2iiIjkyEUXF9ni4P/O9mA8CfxPIBNC+GKM8dkLvT/G+Mg5XprewLkOfP5iYxMRkdxyd9i2CS+bg69aDNXV0O9a7NHPYaVTsC5d0w5RRERS0Jiei+kkQ5cAvgTMAI4AzwEXLC5ERKTl86OH8eULk16KvTuhYyds7O3YpJlwzQ0a9iQi0sY1prgoijFWhxD6Aj1ijEsAQghXNU9oIiKSD/zkCXzNMryyDDa9Ap6B62/GnvgiNnJCm1jtSURELk5jiou1IYSvAtcAvwHIFhpHmyMwERFJj2cysGUDXj4XX70Uas7Alb2xOx/ExkzGSvqnHaKIiOShxhQXTwHfBM4A/3u2bRzw46YOSkRE0uFHDuHLFuDlc2D/XujcBZs0Ext3OwwYqGFPIiJyXo1ZinYb8Gi9tp+T7NItIiItVLInxSoyyxbAKysgk4Ebb8HuewQbMR4r6pB2iCIi0kKct7gIIXwqxvjv2eefOdd5McbvN3VgIiLSfLy2Fja/gleW4WuWwamTyZ4Usx7AJszAevdNO0QREWmBLtRz8Qjw79nnnzrHOQ6ouBARyXOeycAbm5OCYuUSOPYudOqMDR+XbHJ3s/akEBGRD+e8xUWM8a46z6c1fzgiItKU3B127kgKisoyOLQf2hfBbaMoKJ0Ct47E2helHaaIiLQSjdqhO4TQHbgbKAF2A7+JMR5pjsBEROTS+b7dHF/wazILfwd73oaCAhg8HPvoJ7FhY7BOndMOUUREWqHG7NB9O/BL4DXgTaA/8N0QwsdjjPObKT4REblIfuQgvmIxXrEI3tzKCYCBg7HHnk72o+h2edohiohIK9eYnot/BmbHGOPZhhDCQ8B3gZubOjAREbkwP34UX70UryyHLRvAHfpfjz34JD0/ch+H0RwKERHJncYUFyXAL+q1/SfwvaYLR0RELsRPn8LXVSY9FK+ugdpauKovds/DWOlkrHc/ANoVF8OBAylHKyIibUljiosfAZ8H/qlO2x9m20VEpBm5O2zfgi+em0zMrjoN3Xti0+/FSqdA/+u0wZ2IiKSuMcXFCOAPQwh/CuwC+gK9gIoQQtnZk2KMk5s2RBGRtsv378VXlCcFxa43oagDNnoiNm56Mp+ioCDtEEVERN7TmOLie2gIlIhIs/Ojh/EVS/DKRfDGa0nj9Tdjn/yjZNiTVnoSEZE8dcHiIoTwTzHGL8YYf5g9firG+Gyd138RY/x4cwYpItLa+elT+KolyTyKzevBM9BvAPaxT2OjJ2HFV6UdooiIyAVdTM/FE8AX6xz/D+DZOsczmzIgEZG2wt1hx+t4+ZxktaeqU3Blb+yuB5MeipL+aYcoIiLSKBdTXNSfIXihYxEROQ/fuzO7Y3Y5vLMrO49iEjZxZjL8SROzRUSkhbqY4sIbeSwiIvX4yRNJQbF4Lry5FczgxluwjzyAjZqoeRQiItIqXExxURhCmMb7PRT1j7VDk4hIA7y2Fja/glcsxFctgerqZB5FeCpZ8al7z7RDFBERaVIXU1zsA75f5/hgveN9TRqRiEgL529uw5fMxVcugWPvQqfO2Nhp2MRZMOAGDXsSEZFW64LFRYxxQA7iEBFp0TyTgfUrycx9Hl5bD+2LsNtGY6WT4daRWPuitEMUERFpdo3Z50JEROrx6ip82cv4vOdh7y7oUYw99CQ2cRbWuUva4YmIiORUqsVFCOEm4Gd1mq4D/gLoDvwBsD/b/rUY44s5Dk9EpEGeycDWTfiKMnzlYjh+DK65Afvsl7GRE7BCfW4jIiJtU6r/B4wxvgYMAwghtAN2Af8JPAn8Q4zx2ymGJyLyHneHt99IVnxaUQ6HDkBRETZ0DDb1Thg4RHMpRESkzcunj9emA9tijG+GENKORUQEAN+7K1tQlCXDntq1g8HDsY89jg0txTp2SjtEERGRvJFPxcUngJ/UOf5CCOHTwErgyzHGw/XfEEKYDcwGiDFSXFyck0DPKiwszPk9pWHKRf5oDbmoPbCP00vmcbpsLrVvvAZmtB8ynI4PPEbHsdMouOzytEO8KK0hF62FcpE/lIv8oVzkj6bMhbmnvwdeCKEI2A0MiTG+E0K4CjhAskHfN4E+McbPXOAyvnv37maO9IOKi4s5cOBATu8pDVMu8kdLzYUfO4qvWpL0ULz+Krgn8yjGTEk2ubui5e1J0VJz0RopF/lDucgfykX+qJ+LkpISeH9Pu0bJl56LO4HVMcZ3AM5+BQghfA94Ia3ARKT18tMn8bUVeGU5vLoGamuhz9XYfY9goydjV5WkHaKIiEiLki/FxSPUGRIVQugTY9yTPXwA2JBKVCLS6ngmA5vW4Uvm4esqkl2ze1yJzbgfGzMl2UFbE7NFREQuSerFRQihMzAT+Fyd5r8NIQwjGRa1o95rIiKN5vv24BWL8CXz4OA+6NINGz8DGzMZrrsZKyhIO0QREZEWL/XiIsZ4EuhZr+1TKYUjIq2IHz2crPRUWQ7btySNg4ZiH38cGzYWa98+3QBFRERamdSLCxGRpua73sTnPodXLIKaGuh/HfbgE9ioSVjPK9MOT0REpNVScSEirYK7w6trycx9DjauSTa4mzgLu/1urM/VaYcnIiLSJqi4EJEWzQ8fxFeU40vnw6434fIrsI9+EptyB9b1srTDExERaVNUXIhIi+OnT+IrFuPLF8LrG9/fk+KJP8ZKJ2suhYiISEpUXIhIi+Du8MZr+OK5+IpyqDoNvfti9z6CjZ6E9e6bdogiIiJtnooLEclrfvwovnwhvnhuMuypQ8ekmJg0C669UXtSiIiI5BEVFyKSdzyTgdfW4+Vz8DXLkhWfrr0R+/QXsNETsY6d0w5RREREGqDiQkTyhh85iC+Zn2x0t38vdO6KTbkTmzgD63dt2uGJiIjIBai4EJFUeW0tbFhFpnwOrF8JmQzcdCt2/2PYiHFY+6K0QxQREZGLpOJCRFLh+3bjSxbgS+fBkUPJErIfeQCbOBPrVZJ2eCIiInIJVFyISM74kYPJErKVZbDjdbACuGUEBY89DbeMwgr1nyQREZGWTP8nF5Fm5TU18EolmfK5sHF1sidF/+uxh57ERk/GruiZdogiIiLSRFRciEiTc3fYvgWvLEt6KY69C917Ync9hI2dpj0pREREWikVFyLSZPzIIY7PfY7Myy8mqz0VFsKtoyiYOBNuGYEVtEs7RBEREWlGKi5E5ENxd9ixFV/4Il6xiBOZWhg0FLs7YMPHYp27ph2iiIiI5IiKCxFpNK+pgS0b8LXL8bWVcPgAFHXAJn+EHg89zpH2HdMOUURERFKg4kJELpqfOIaXvYQveCFZPraoCAaPwD76SWxoKdalK4XFxXDgQNqhioiISApUXIjIeb037GnxHHz5QqiugkFDKXhkNgwZiXXokHaIIiIikidUXIhIg/zIQXzlYnzJfNi5A4qKsFGTsBn3YVdfm3Z4IiIikodUXIjIe/zEMXzV0mT52C0b3t+T4rGnsdIpWOcuaYcoIiIieUzFhUgb51Wn8bUV+Ipy2LAaamugVwl298NY6WSsT7+0QxQREZEWQsWFSBvkNWdg4xq8YhG+rjKZR9G9J3b73diYKUlvhVnaYYqIiEgLkxfFRQhhB3AMqAVqYoyjQgg9gJ8BA4AdQIgxHk4rRpHWwN/ejpfPwSsWwcnj0LVbsmN26WQYOBgrKEg7RBEREWnB8qK4yJoWY6y7fuVXgPkxxm+FEL6SPf6zdEITabn81Em8sgwvnwNvboXC9tiIcdjYqTBoGFaYT/8ZEBERkZYsn/+quB+Ymn3+Q2AhKi5ELoq7w7bNyfKxKxYnw576XoN9YjY2dgrWpVvaIYqIiEgrZO6edgyEELYDhwEH/jXG+EwI4UiMsXudcw7HGK+o977ZwGyAGOPI6urqXIZNYWEhNTU1Ob2nNEy5SGSOvsuphb/l1LxfU/v2dqxjZzpOmkGnGfdROHBQTuZRKBf5Q7nIH8pF/lAu8odykT/q56KoqAjgkv5oyJeeiwkxxt0hhF7A3BDC5ot5U4zxGeCZ7KEfyPGuwMXFxeT6ntKwtp4L37MTn/c8vuxlOFMN192EffoL2OhJVHfsRDXAwYM5iaWt5yKfKBf5Q7nIH8pF/lAu8kf9XJSUlFzytfKiuIgx7s5+3RdC+E+gFHgnhNAnxrgnhNAH2JdqkCJ5xk8cx1cvTZaQ3bQumUsxbhp2+z1YvwFphyciIiJtUOrFRQihC1AQYzyWfT4L+CvgV8DjwLeyX59PL0qR/OBVp/F1lckmd+/tSdEHu/9RbPId2GXdL3wRERERkWaSenEBXAX8ZwgBknj+I8b4uxDCCiCGEJ4C3gIeSjFGkdR4phY2rsWXv5zsSVF1Grr3SPakKJ0M19ygPSlEREQkL6ReXMQY3wCGNtB+EJie+4hE8oMfeAdfMh9fMg8OH4Au3bAxU+rsSdEu7RBFREREPiD14kJE3udHj+CrliTDnrZuAjMYMpyChz8LQ0djhe3TDlFERETknFRciKTMT53E1yzDK8pg8zrIZJI9KT76SWzsVKxnr7RDFBEREbkoKi5EUnB2HkVm8Vx4ZQXUnIHiq7A7Po6VTsb6XpN2iCIiIiKNpuJCJIf84D588bz351F0uxybcgc2elKyN4UmZouIiEgLpuJCpJl5TQ2sqyBTNgc2rU0aBw/TPAoRERFpdVRciDQTP3kcL5+Dz38h6aW4ohi7+2Fs4gzNoxAREZFWScWFSBPymhp4dQ1eWYavrUj2pLjpVgoeexpuHanlY0VERKRVU3Eh0gR83x58wQv48oVw4hh07ppMzJ5yJ3bN9WmHJyIiIpITKi5ELpFnMrBlA5mXfwNrlkNBO2zEOGzMFBgyXHMpREREpM1RcSHSSH7kEL5sAV4+B/bvTXop7vg4dvvdWPeeaYcnIiIikhoVFyIXwWtrYeNqMuVzkn0pMhm48RbsvkewEeOxog5phygiIiKSOhUXIufg7rB9SzI5e+USePdQsi/FzI9iE2divfumHaKIiIhIXlFxIVKP73orKShWlCXDngoL4ZZRFIybCreVYoX6tRERERFpiP5KEgF8/158RTleWQa73gQrgEG3YXcHbPhYrHPXtEMUERERyXsqLqTN8qOH8RVLkh6KbZuTxutvxh6ZjY2agF12RboBioiIiLQwKi6kTfGTx/HVy5Ieis3rwTPQbwD2sU9joydhxVelHaKIiIhIi6XiQlo9r6rCX1mRFBQbVkJNDVzZG7vrwWSju5L+aYcoIiIi0iqouJBWyd1h2ya8bA6+ehlUnYLLe2BT78JKJ8OAgZhZ2mGKiIiItCoqLqRV8UP7k4nZi+fB3p3QsRNWOikpKG4cghW0SztEERERkVZLxYW0eH7sXU6uLKN2wYvw+qtJ4/U3Y098ERs5AevYKd0ARURERNoIFRfSIvnpk/iaCrxyEby6lmOZDPS5Grv/saSnoldJ2iGKiIiItDkqLqTF+MA8ilWLoboaevbCPvIAV8y6nyNdLtc8ChEREZEUpVpchBCuBn4E9AYywDMxxu+EEP4S+ANgf/bUr8UYX0wnSkmb79uNV5bjFYuSeRQdOmFjp2Hjbk+GP5nRvrgYO3Ag7VBFRERE2rS0ey5qgC/HGFeHELoBq0IIc7Ov/UOM8dspxiYp8iMH8RWLk+Vjd7yeNA4cjH3kv2CjJmoehYiIiEgeSrW4iDHuAfZknx8LIWwC+qYZk6THT57AV2YLii0bwB36X489+CQ2eiLW48q0QxQRERGR8zB3TzsGAEIIA4Ay4BbgS8ATwFFgJUnvxuEG3jMbmA0QYxxZXV2dq3ABKCwspKamJqf3bG3cnTObXuHUvF9zesl8qK6iXd/+dJw0k44TZ1DY95qLuo5ykT+Ui/yhXOQP5SJ/KBf5Q7nIH/VzUVRUBHBJE1nzorgIIXQFFgF/HWP8ZQjhKuAA4MA3gT4xxs9c4DK+e/fuZo70g4qLizmgcf6XxGvOJMOe5j4Hb2/P7kcxBZs4Ewbc0OiJ2cpF/lAu8odykT+Ui/yhXOQP5SJ/1M9FSUkJXGJxkfacC0II7YFfAD+OMf4SIMb4Tp3Xvwe8kFJ40sT8xDG87CV8wQtw5FCyfOynv4CNnqR5FCIiIiItXNqrRRnwLLApxvj3ddr7ZOdjADwAbEgjPmkayRKym/GKhfjSBVBdBYOGUvD4f4EhI7R8rIiIiEgrkXbPxQTgU8D6EMLabNvXgEdCCMNIhkXtAD6XTnhyqdwddu3AK8vwynI4uA8K2ycTs2d+FLv62rRDFBEREZEmlvZqUYtpeDyX9rRooXzfnmxBUQZ73oaCAhg8DLvvUWz4WKxT57RDFBEREZFmknbPhbQCfuRgsoRsRZ09KW4YjD36NDZqAtbt8nQDFBEREZGcUHEhl8RPHMNXLa23J8V12INPYKMmYT21J4WIiIhIW6PiQi6aV53G11YkBcXGNVBbA71KsLsfxkonY336pR2iiIiIiKRIxYWcl9ecgQ2rk3kU6yqTlZ6698Sm34OVTk520NZqTyIiIiKCigtpgGcy8Np6fEU5vmoJnDwBXbthY6clBcXAwVhBQdphioiIiEieUXEh7/FDB/Cl8/DF85KlYzt0woaPSQqKQcOwQv1zEREREZFz01+LbVyywd0mMnOeg7WV4BkYNBT72KexoWOwDh3SDlFEREREWggVF22UV53GVy3BF/4Wtm+BLt2wOz6GTZqFXdk77fBEREREpAVScdGGuDu88Rq+bEGy4tOpk9C7L/bY09i427EOHdMOUURERERaMBUXbYDv3IFXLsIry5O5FO2LsJETsEmzksnZWu1JRERERJqAiotWyvfvTZaPrSyD3W9BQUEyl+K+R7BhY7HOXdIOUURERERaGRUXrYgfOYSvLE96KLZvSRpvGIw9+jQ2cjx2Wfd0AxQRERGRVk3FRQvnJ47jq5cmPRSvrQd3uPpa7OOPY6MnYz2vTDtEEREREWkjVFy0QH7mDKyrILN8IWxYDbU10KsEu/thrHQy1qdf2iGKiIiISBuk4qIF8T1v4+Vz8GUvw/Gj0L0nNv2eZJO7/tdrYraIiIiIpErFRZ7zqtP4yiX44jmwdRO0awfDxlAwcRYMHooVtEs7RBERERERQMVF3vJD+/EFL+Blc+DUiWQ/igefxMZN08RsEREREclLKi7yiJ85AxtX4csX4WuXgzs2Yjw27W7tRyEiIiIieU/FRcrcHV5/Ndk1e/VSOHkCul6GTbsHm3Ev1rNX2iGKiIiIiFwUFRcp8WNH8aXz8PK58M4u6NAJGzE2mZx981CsUKkRERERkZZFf8HmmO/dic/9Fb5sAZyphhsGYXc+iI2agHXomHZ4IiIiIiKXTMVFDrg7bNlAZs5z8MoKKGyfTMyefh/Wt3/a4YmIiIiINIm8Li5CCHcA3wHaAf8WY/xWyiE1itfU4CsX43Ofh7e2JXMp7v0ENvUurfgkIiIiIq1O3hYXIYR2wHeBmcBOYEUI4VcxxlfTjezC/OA+vGIRvvC3cPgA9O6Hferz2NipWFGHtMMTEREREWkWeVtcAKXA1hjjGwAhhJ8C9wN5WVz40SNJL0VlGWzbnDTedCsFn/xDuGUkVlCQboAiIiIiIs0sn4uLvsDbdY53AmPqnhBCmA3MBogxUlxcnLvogMLCwvfuefzlFzjxk2covOZ6On7yaTpOnEG7q0pyGk9bVjcXki7lIn8oF/lDucgfykX+UC7yR1PmIp+Li4Z2jPO6BzHGZ4Bnzr524MCBZg+qruLiYs7e00dOoODGW/G+13AK+P/bu/tguer6juPvNNIUxQhMUZuAPNTIVPwjqCgzTH0YLQ+OGGWmX2OtUKCCrRnL2OkgyLQU+oBasNh2rAhOYCTgh7ZoOg0NkXamTMdIDOhUpIw8xHJJGqokBqtCA+kf51xYwt0rm3vu7ubm/Zo5c3fPnnPub/c7v7P7Pb+H8xOAIZdnX9YbC42WsRgfxmJ8GIvxYSzGh7EYH7vHYtGiPb9APs7JxQRwWM/zQ4HNIyrLzzRv4UGw8KBRF0OSJEkamXFOLjYAS6rqSOBhYDnwG6MtkiRJkqR+xnaUcZKdwApgLXBPsyp3j7ZUkiRJkvoZ55YLkqwB1oy6HJIkSZJ+trFtuZAkSZK0dzG5kCRJktQJkwtJkiRJnTC5kCRJktQJkwtJkiRJnTC5kCRJktQJkwtJkiRJnZi3a9euUZehK3PmjUiSJEkjNm9PdppLLRfzhr1U1cZR/F8XYzHOi7EYn8VYjM9iLMZnMRbjsxiL8Vn6xGKPzKXkQpIkSdIImVxIkiRJ6oTJxcxcNeoC6GnGYnwYi/FhLMaHsRgfxmJ8GIvx0Vks5tKAbkmSJEkjZMuFJEmSpE68YNQF2BtV1cnAlcB84Ookl424SHNaVR0GXAe8HHgKuCrJlVV1MfBB4H/aTS9Msqbd5wLgbOBJ4CNJ1g694HNUVW0CHqP5bHcmeX1VHQx8CTgC2ARUkm1VNY+mrrwD+DHwW0nuHEW555qqOprmM590FPCHwIFYL4aiqr4AvBN4JMlr2nUD14WqOgO4qD3snyS5dpjvY2/XJw6fAk4FngDuB85Msr2qjgDuAe5td1+f5EPtPq8DVgL7A2uA30ti944B9YnHxQx4XvK31sz0icOXgKPbTQ4EtidZ2nW9MLkYUFXNB/4G+DVgAthQVauTfGe0JZvTdgK/n+TOqnoxsLGq1rWvfTrJX/RuXFWvBpYDxwCLgK9W1auSPDnUUs9tb03y/Z7nHwNuS3JZVX2sfX4+cAqwpF3eCHy2/asZSnIvsBSePi89DNwMnIn1YlhWAn9Nc/Fj0kB1oU1G/gh4Pc39mja23ynbhvYu9n4reW4c1gEXJNlZVZ8ALqCJA8D9SZZOcZzPAucA62l+RJ0M3DJbhZ7DVvLceMAA56X2ZX9rzcxKdotDkvdOPq6qy4Ef9mzfWb2wW9Tg3gDcl+SBJE8ANwLLRlymOS3JlskrfEkeo8muF0+zyzLgxiSPJ3kQuI8mbpo9y4DJq63XAu/uWX9dkl1J1gMHVtUvjaKAc9zbaL4YvjfNNtaLjiX5N+DR3VYPWhdOAtYlebRNKNbRfHnreZoqDkluTbKzfboeOHS6Y7SxWJjka+1V2et4JnYaQJ960U+/85K/tWZouji0LakF3DDdMfa0XthyMbjFwEM9zyfwSuzQtE13xwJfB04AVlTV6cA3aFo3ttHEaH3PbhNMn4xoMLuAW6tqF/C5JFcBL0uyBZpksKpe2m47VX1ZDGwZZoH3Act59peE9WJ0Bq0L/darO2fx7C6ER1bVXcAO4KIkt9N85hM92xiH7g16XvK31uz5VWBrku/2rOusXthyMbip7lhon8whqKoDgL8Hzkuyg6ap7pdpuoZsAS5vNzVGs+uEJK+l6ebx4ap60zTbGotZVlU/D7wLuKldZb0YT/0+f+Myi6rq4zRda69vV20BXpHkWOCjwKqqWohxmG2DnpeMx+x6H8++INVpvTC5GNwEcFjP80OBzSMqyz6jqvajSSyuT/IPAEm2JnkyyVPA53mmi4cxmkVJNrd/H6Hp4/8GYOtkd6f27yPt5sZi9p0C3JlkK1gvxsCgdcG4zJJ2oPw7gfdPDkBtu9/8oH28kWaw96to4tDbdco4dGgPzkvWi1lSVS8ATqOnNa/remFyMbgNwJKqOrK9YrgcWD3iMs1pbd/Aa4B7klzRs7637/57gG+3j1cDy6tqQVUdSTOA8o5hlXcuq6oXtYPqqaoXASfSfO6rgTPazc4AvtI+Xg2cXlXzqup44IeTXUbUmWddgbJejNygdWEtcGJVHVRVB9HUKWfxmqF2pqHzgXcl+XHP+kPaCRCoqqNo6sEDbSweq6rj2++c03kmdpqhPTgv+Vtr9rwd+M8kT3d36rpeOOZiQO3MEytoTv7zgS8kuXvExZrrTgA+AAM2a5kAAARgSURBVPxHVX2zXXch8L6qWkrTRLcJOBcgyd1VFeA7NM3hH3ZGnM68DLi5qqA5f6xK8s9VtQFIVZ0N/Bfw6+32a2im3ryPZvrNM4df5Lmrql5IM5vKuT2rP2m9GI6qugF4C/CLVTVBM+vTZQxQF5I8WlWX0vyYArgkyfMdDCv6xuECYAGwrj1fTU6t+SbgkqraSTP16Yd6Pu/f4ZkpN2/BmaL2SJ94vGXQ85K/tWZmqjgkuYbnjtGDjuuFd+iWJEmS1Am7RUmSJEnqhMmFJEmSpE6YXEiSJEnqhMmFJEmSpE6YXEiSJEnqhMmFJImqurCqrh51OSRJezenopWkfUBV/ajn6QuBx2nmMwc4N8n1QyzL2cAfAItp7vnwDWB5kseqaiUwkeSiYZVHktQdb6InSfuAJAdMPq6qTcBvJ/nqsMtRVW8G/gw4OcldVXUwcOqwyyFJmh0mF5Ikqupi4JVJfrOqjgAeBM4CLgEOoLnj8UbgGuAVwBeTrOjZ/yya1oiXA3cA5yT53hT/6jjga0nuguYO1cC17THOAd4P7Kqq84B/TXJqVS0C/ormLrI/Aj6d5DM95X4NTSvMO4DvAmcm+Vb7+vnAR4CFwGbgd5PcNtPPS5I0NcdcSJL6eSOwBHgv8JfAx4G3A8cA1bZCUFXvBi4ETgMOAW4HbuhzzK8DJ1XVH1fVCVW1YPKFJFcB1wOfTHJAm1j8HPCPwLdoulG9DTivqk7qOeYy4CbgYGAV8OWq2q+qjgZWAMcleTFwErBphp+JJGkaJheSpH4uTfLTJLcC/wvckOSRJA/TJBDHttudC/x5knuS7KTp9rS0qg7f/YBJbqdJQl4L/BPwg6q6oqrm9ynDccAhSS5J8kSSB4DPA8t7ttmY5O+S/B9wBfALwPE0rRkLgFdX1X5JNiW5f0afiCRpWnaLkiT1s7Xn8U+meD45juNw4Mqqurzn9Xk0LQ3P6RqV5BbglrZV4q00rQ73Ap+bogyHA4uqanvPuvk0yc2kh3qO/VRVTQCLktzedq+6GDimqtYCH02yuf9bliTNhMmFJGmmHgL+dNAZp5I8BdxWVf9CM24CYPcpDB8CHkyyZJpDHTb5oE1YDqUZX0GSVcCqqlpIk7x8AvjAIOWUJD1/JheSpJn6W+DSqvpmkrur6iXAiUlu2n3DqloG7A+sBbbTdHt6M3Beu8lW4KieXe4AdrQDsz8DPAH8CrB/kg3tNq+rqtOA1TSDtx8H1rdjLhYD/w78lKa1xe7AkjSLPMlKkmYkyc00LQI3VtUO4NvAKX023wZ8kGZWpx3AF4FP9bR6XEMzRmJ7VX05yZM0U9UupZnB6vvA1cBLeo75FZpB59toWiVOa8dfLAAua/f5b+ClNAPPJUmzxJvoSZL2Wr1T6I66LJIkWy4kSZIkdcTkQpIkSVIn7BYlSZIkqRO2XEiSJEnqhMmFJEmSpE6YXEiSJEnqhMmFJEmSpE6YXEiSJEnqhMmFJEmSpE78Px0Y/FinFu+8AAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 936x360 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "(<Figure size 936x360 with 1 Axes>,\n",
       " <Figure size 936x360 with 1 Axes>,\n",
       " <Figure size 936x360 with 1 Axes>)"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "def plot_episode_stats(stats, smoothing_window=10, noshow=False):\n",
    "    # Plot the episode length over time\n",
    "    fig1 = plt.figure(figsize=(13,5))\n",
    "    plt.plot(stats.episode_lengths)\n",
    "    plt.xlabel(\"Episode\")\n",
    "    plt.ylabel(\"Episode Length\")\n",
    "    plt.title(\"Episode Length over Time\")\n",
    "    if noshow:\n",
    "        plt.close(fig1)\n",
    "    else:\n",
    "        plt.show(fig1)\n",
    "\n",
    "    # Plot the episode reward over time\n",
    "    fig2 = plt.figure(figsize=(13,5))\n",
    "    rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()\n",
    "    plt.plot(rewards_smoothed)\n",
    "    plt.xlabel(\"Episode\")\n",
    "    plt.ylabel(\"Episode Reward (Smoothed)\")\n",
    "    plt.title(\"Episode Reward over Time (Smoothed over window size {})\".format(smoothing_window))\n",
    "    if noshow:\n",
    "        plt.close(fig2)\n",
    "    else:\n",
    "        plt.show(fig2)\n",
    "\n",
    "    # Plot time steps and episode number\n",
    "    fig3 = plt.figure(figsize=(13,5))\n",
    "    plt.plot(np.cumsum(stats.episode_lengths), np.arange(len(stats.episode_lengths)))\n",
    "    plt.xlabel(\"Time Steps\")\n",
    "    plt.ylabel(\"Episode\")\n",
    "    plt.title(\"Episode per time step\")\n",
    "    if noshow:\n",
    "        plt.close(fig3)\n",
    "    else:\n",
    "        plt.show(fig3)\n",
    "\n",
    "    return fig1, fig2, fig3\n",
    "\n",
    "plot_episode_stats(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
