{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.8-final"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python_defaultSpec_1598268515350",
   "display_name": "Python 3.7.8 64-bit ('venv': venv)"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "CartPole 中的表格表示\n",
    "\n",
    "+ 状态存储在变量 observation 中，4 个变量组成的列表：\n",
    "+ - 小车位置（-2.4 ~ 2.4）\n",
    "+ - 小车速度（-∞ ~ +∞）\n",
    "+ - 杆的角度（-41.8° ~ +41.8°）\n",
    "+ - 杆的角速度（-∞ ~ +∞）\n",
    "\n",
    "这里将连续值离散化以制作 Q函数 的`表格`：\n",
    "以 6 个值离散化每个变量，$6^4=1296$\n",
    "以 6 进制表示，例如: $1* 6^0+2* 6^1+3* 6^2+4* 6^3=985\\$则\n",
    "$$\n",
    "s_\\text{位置、速度、角度、角速度}=s_{(1，2，3，4)}\\to s_{985}\n",
    "$$"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Q学习的实现\n",
    "在这个任务中要定义和实现三个类`Agent`、`Brain`和`Environment`：\n",
    "    ![3.10 CartPole中Q学习实现的概述](media/3.10CartPole中Q学习实现的概述.png)\n",
    "\n",
    "+ Agent 类:\n",
    "    + 函数 update_Q_function    更新Q函数\n",
    "    + 函数 get_action   确定下一个动作\n",
    "+ Brain 类\n",
    "    + 函数 bins\n",
    "    + 函数 digitize_state\n",
    "    + 函数 update_Q_table   更新Q表\n",
    "    + 函数 decision_action  确定来自Q表的动作\n",
    "+ Environment 类\n",
    "    + OpenAI Gym 的执行环境\n",
    "    + run 函数\n",
    "\n",
    "流程：\n",
    "\n",
    "+ Agent \n",
    "    将当前状态 ***observation_t*** 传递给 Brain\n",
    "\n",
    "+ Brain\n",
    "    离散化状态\n",
    "\n",
    "    根据***observation_t***、***Q表*** 确定动作 ***action_t***\n",
    "\n",
    "    将动作 ***action_t*** 返回给 Agent\n",
    "\n",
    "    将动作 ***action_t*** 传递给 Environment\n",
    "\n",
    "+ Environment\n",
    "    执行 ***action_t*** 得到状态 ***observation_t+1***、即时奖励 ***reward_t+1***\n",
    "\n",
    "    将 ***observation_t+1***、***reward_t+1*** 返回给 Agent\n",
    "\n",
    "+ Agent\n",
    "    将 ***transition(observation_t action_t observation_t+1 reward_t+1)*** 传递给 Brain\n",
    "\n",
    "+ Brain\n",
    "    更新***Q表***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 声明要使用的包\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import gym"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 声明动画的绘制函数\n",
    "from JSAnimation.IPython_display import display_animation\n",
    "from matplotlib import animation\n",
    "from IPython.display import HTML, display\n",
    "\n",
    "def display_frames_as_gif(frames):\n",
    "    \"\"\"\n",
    "    Displays a list of frames as a gif, with controls\n",
    "    以gif格式显示关键帧列，带有控件\n",
    "    \"\"\"\n",
    "    \n",
    "    fig = plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0),dpi=72)\n",
    "    patch = plt.imshow(frames[0])\n",
    "    plt.axis('off')\n",
    "    \n",
    "    def animate(i):\n",
    "        img = patch.set_data(frames[i])\n",
    "        return img   ## *** return是必须要有的 ***\n",
    "        \n",
    "    anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50)\n",
    "    \n",
    "    anim.save('media/movie_cartpole.mp4')\n",
    "    return HTML(anim.to_jshtml())  ## *** 返回一个HTML对象，以便被调用者显示。 ***\n",
    "    # display(display_animation(anim, default_mode='loop'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设定常数\n",
    "ENV = 'CartPole-v0' # 任务名称\n",
    "NUM_DIZITIZED = 6   # 将每个状态划分为离散值的个数\n",
    "GAMMA = 0.99        # 时间折扣率\n",
    "ETA = 0.5           # 学习系数\n",
    "MAX_STEPS = 200     # 一次实验中的步数\n",
    "NUM_EPISODES = 1000 # 最大试验次数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设定 Agent 类\n",
    "class Agent:\n",
    "    \n",
    "    def __init__(self, num_states, num_actions):\n",
    "        self.brain = Brain(num_states, num_actions)  # 为智能体创建 brain 以做出决策\n",
    "\n",
    "    def update_Q_function(self, observation, action, reward, observation_next):\n",
    "        '''更新 Q函数，参数是状态、动作、得到的奖励、之后的状态'''\n",
    "        self.brain.update_Q_table(observation, action, reward, observation_next)\n",
    "\n",
    "    def get_action(self, observation, step):\n",
    "        '''确定动作'''\n",
    "        action = self.brain.decide_action(observation, step)\n",
    "        return action"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设定 Brain 类\n",
    "class Brain:\n",
    "    def __init__(self, num_states, num_actions):\n",
    "        self.num_actions = num_actions  # CarPole 的两种动作\n",
    "        self.q_table = np.random.uniform(low=0, high=1, size=(NUM_DIZITIZED**num_states, num_actions))\n",
    "        # 创建Q表，行数是转换为数字得到的分割数（4个变量），列数表示动作数\n",
    "    \n",
    "    def bins(self, clip_min, clip_max, num):\n",
    "        '''求得观察到的状态（连续值）到离散值的数字转换与之'''\n",
    "        return np.linspace(clip_min, clip_max, num + 1)[1:-1]\n",
    "\n",
    "    def digitize_state(self, observation):\n",
    "        '''将观察到的 observation 状态转换为离散值'''\n",
    "        cart_pos, cart_v, pole_angle, pole_v = observation\n",
    "        digitized = [\n",
    "            np.digitize(cart_pos, bins=self.bins(-2.4, 2.4, NUM_DIZITIZED)),\n",
    "            np.digitize(cart_v, bins=self.bins(-3.0, 3.0, NUM_DIZITIZED)),\n",
    "            np.digitize(pole_angle, bins=self.bins(-0.5, 0.5, NUM_DIZITIZED)),\n",
    "            np.digitize(pole_v, bins=self.bins(-2.0, 2.0, NUM_DIZITIZED))\n",
    "        ]\n",
    "        return sum([x * (NUM_DIZITIZED**i) for i, x in enumerate(digitized)])\n",
    "\n",
    "    def update_Q_table(self, observation, action, reward, observation_next):\n",
    "        '''Q学习更新的Q表'''\n",
    "        state = self.digitize_state(observation)  # 状态离散化\n",
    "        state_next = self.digitize_state(observation_next)  # 将下一个状态离散化\n",
    "        Max_Q_next = max(self.q_table[state_next][:])\n",
    "        self.q_table[state, action] = self.q_table[state, action] + \\\n",
    "            ETA * (reward + GAMMA * Max_Q_next - self.q_table[state, action])\n",
    "\n",
    "    def decide_action(self, observation, episode):\n",
    "        '''根据 ε-贪婪法 逐渐采用最优动作'''\n",
    "        state = self.digitize_state(observation)\n",
    "        epsilon = 0.5 * (1 / (episode + 1))\n",
    "\n",
    "        if epsilon <= np.random.uniform(0, 1):\n",
    "            action = np.argmax(self.q_table[state][:])\n",
    "        else:\n",
    "            action = np.random.choice(self.num_actions)  # 返回行动0向左,1向右\n",
    "        return action"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设定环境类\n",
    "class Environment:\n",
    "    \n",
    "    def __init__(self):\n",
    "        self.env = gym.make(ENV)    # 设定要执行的任务\n",
    "        num_states = self.env.observation_space.shape[0]    # 取得任务状态的变量个数\n",
    "        num_actions = self.env.action_space.n   # CartPole 中的动作数\n",
    "        self.agent = Agent(num_states, num_actions) # 创建在环境中行动的Agent\n",
    "\n",
    "    def run(self):\n",
    "        '''执行函数'''\n",
    "        complete_episodes = 0   # 持续195步或更多的试验次数\n",
    "        is_episode_final = False    # 最终试验的标志\n",
    "        frames = [] # 用于存储视频图像的变量\n",
    "\n",
    "        for episode in range(NUM_EPISODES): # 试验的最大重复次数\n",
    "            observation = self.env.reset()  # 环境初始化\n",
    "\n",
    "            for step in range(MAX_STEPS):   # 每个回合的循环\n",
    "\n",
    "                if is_episode_final is True:    # 将最终试验各个时刻的图像添加到帧中\n",
    "                    frames.append(self.env.render(mode='rgb_array'))    # 将各个时刻的图像添加到帧中\n",
    "\n",
    "                # 获取动作\n",
    "                action = self.agent.get_action(observation, episode)\n",
    "\n",
    "                # 执行动作 a_t 找到 s_{t+1}, r_{t+1}\n",
    "                observation_next, _, done, _ = self.env.step(\n",
    "                    action) # 不使用 regain 和 info\n",
    "\n",
    "                # 给予奖励\n",
    "                if done:    # 如果步数超过200，或者如果倾斜超过某个角度，则 done == True\n",
    "                    if step < 195:\n",
    "                        reward = -1 # 半途摔倒，奖励 -1\n",
    "                        complete_episodes = 0   # 站立超过 195 步，重置试验次数\n",
    "                    else:\n",
    "                        reward = 1  # 一直站立到结束给予奖励 1\n",
    "                        complete_episodes += 1  # 更新连续记录\n",
    "                else:\n",
    "                    reward = 0  # 途中奖励为 0\n",
    "\n",
    "                # 使用 step_1 的状态 observation_next 更新 Q 函数\n",
    "                self.agent.update_Q_function(\n",
    "                    observation, action, reward, observation_next)\n",
    "\n",
    "                # 更新 observation\n",
    "                observation = observation_next\n",
    "\n",
    "                # 结束时的处理\n",
    "                if done:\n",
    "                    print('{0} Episode: Finished after {1} time steps'.format(\n",
    "                        episode, step + 1))\n",
    "                    break\n",
    "\n",
    "            if is_episode_final is True:  # 在最后一次试验中保存并绘制动画\n",
    "                self.env.close() # 关闭奇怪的神秘窗口 *** python执行窗口 ***\n",
    "                html = display_frames_as_gif(frames)\n",
    "                html\n",
    "                break\n",
    "\n",
    "            if complete_episodes >= 10:  # 连续成功10次，绘制下一次试验为最终试验\n",
    "                print('连续成功 10 次')\n",
    "                is_episode_final = True  # 标记下次试验为最终试验"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "0 Episode: Finished after 9 time steps\n1 Episode: Finished after 80 time steps\n2 Episode: Finished after 18 time steps\n3 Episode: Finished after 154 time steps\n4 Episode: Finished after 51 time steps\n5 Episode: Finished after 70 time steps\n6 Episode: Finished after 64 time steps\n7 Episode: Finished after 147 time steps\n8 Episode: Finished after 66 time steps\n9 Episode: Finished after 66 time steps\n10 Episode: Finished after 34 time steps\n11 Episode: Finished after 72 time steps\n12 Episode: Finished after 47 time steps\n13 Episode: Finished after 200 time steps\n14 Episode: Finished after 107 time steps\n15 Episode: Finished after 50 time steps\n16 Episode: Finished after 87 time steps\n17 Episode: Finished after 46 time steps\n18 Episode: Finished after 43 time steps\n19 Episode: Finished after 200 time steps\n20 Episode: Finished after 46 time steps\n21 Episode: Finished after 200 time steps\n22 Episode: Finished after 9 time steps\n23 Episode: Finished after 83 time steps\n24 Episode: Finished after 63 time steps\n25 Episode: Finished after 25 time steps\n26 Episode: Finished after 9 time steps\n27 Episode: Finished after 45 time steps\n28 Episode: Finished after 104 time steps\n29 Episode: Finished after 56 time steps\n30 Episode: Finished after 15 time steps\n31 Episode: Finished after 81 time steps\n32 Episode: Finished after 19 time steps\n33 Episode: Finished after 80 time steps\n34 Episode: Finished after 13 time steps\n35 Episode: Finished after 9 time steps\n36 Episode: Finished after 76 time steps\n37 Episode: Finished after 60 time steps\n38 Episode: Finished after 21 time steps\n39 Episode: Finished after 148 time steps\n40 Episode: Finished after 195 time steps\n41 Episode: Finished after 18 time steps\n42 Episode: Finished after 192 time steps\n43 Episode: Finished after 18 time steps\n44 Episode: Finished after 21 time steps\n45 Episode: Finished after 200 time steps\n46 Episode: Finished after 29 time steps\n47 Episode: Finished after 62 time steps\n48 Episode: Finished after 41 time steps\n49 Episode: Finished after 39 time steps\n50 Episode: Finished after 40 time steps\n51 Episode: Finished after 45 time steps\n52 Episode: Finished after 69 time steps\n53 Episode: Finished after 118 time steps\n54 Episode: Finished after 124 time steps\n55 Episode: Finished after 74 time steps\n56 Episode: Finished after 55 time steps\n57 Episode: Finished after 67 time steps\n58 Episode: Finished after 23 time steps\n59 Episode: Finished after 82 time steps\n60 Episode: Finished after 86 time steps\n61 Episode: Finished after 129 time steps\n62 Episode: Finished after 187 time steps\n63 Episode: Finished after 83 time steps\n64 Episode: Finished after 15 time steps\n65 Episode: Finished after 10 time steps\n66 Episode: Finished after 124 time steps\n67 Episode: Finished after 12 time steps\n68 Episode: Finished after 11 time steps\n69 Episode: Finished after 27 time steps\n70 Episode: Finished after 114 time steps\n71 Episode: Finished after 89 time steps\n72 Episode: Finished after 63 time steps\n73 Episode: Finished after 43 time steps\n74 Episode: Finished after 75 time steps\n75 Episode: Finished after 108 time steps\n76 Episode: Finished after 144 time steps\n77 Episode: Finished after 44 time steps\n78 Episode: Finished after 200 time steps\n79 Episode: Finished after 134 time steps\n80 Episode: Finished after 119 time steps\n81 Episode: Finished after 62 time steps\n82 Episode: Finished after 99 time steps\n83 Episode: Finished after 108 time steps\n84 Episode: Finished after 168 time steps\n85 Episode: Finished after 64 time steps\n86 Episode: Finished after 84 time steps\n87 Episode: Finished after 70 time steps\n88 Episode: Finished after 58 time steps\n89 Episode: Finished after 149 time steps\n90 Episode: Finished after 200 time steps\n91 Episode: Finished after 200 time steps\n92 Episode: Finished after 85 time steps\n93 Episode: Finished after 200 time steps\n94 Episode: Finished after 93 time steps\n95 Episode: Finished after 200 time steps\n96 Episode: Finished after 200 time steps\n97 Episode: Finished after 153 time steps\n98 Episode: Finished after 86 time steps\n99 Episode: Finished after 133 time steps\n100 Episode: Finished after 113 time steps\n101 Episode: Finished after 109 time steps\n102 Episode: Finished after 200 time steps\n103 Episode: Finished after 184 time steps\n104 Episode: Finished after 200 time steps\n105 Episode: Finished after 200 time steps\n106 Episode: Finished after 45 time steps\n107 Episode: Finished after 41 time steps\n108 Episode: Finished after 110 time steps\n109 Episode: Finished after 200 time steps\n110 Episode: Finished after 200 time steps\n111 Episode: Finished after 141 time steps\n112 Episode: Finished after 200 time steps\n113 Episode: Finished after 200 time steps\n114 Episode: Finished after 200 time steps\n115 Episode: Finished after 200 time steps\n116 Episode: Finished after 200 time steps\n117 Episode: Finished after 144 time steps\n118 Episode: Finished after 32 time steps\n119 Episode: Finished after 116 time steps\n120 Episode: Finished after 200 time steps\n121 Episode: Finished after 97 time steps\n122 Episode: Finished after 200 time steps\n123 Episode: Finished after 84 time steps\n124 Episode: Finished after 200 time steps\n125 Episode: Finished after 86 time steps\n126 Episode: Finished after 200 time steps\n127 Episode: Finished after 91 time steps\n128 Episode: Finished after 111 time steps\n129 Episode: Finished after 200 time steps\n130 Episode: Finished after 161 time steps\n131 Episode: Finished after 200 time steps\n132 Episode: Finished after 200 time steps\n133 Episode: Finished after 172 time steps\n134 Episode: Finished after 200 time steps\n135 Episode: Finished after 200 time steps\n136 Episode: Finished after 200 time steps\n137 Episode: Finished after 135 time steps\n138 Episode: Finished after 200 time steps\n139 Episode: Finished after 185 time steps\n140 Episode: Finished after 200 time steps\n141 Episode: Finished after 183 time steps\n142 Episode: Finished after 200 time steps\n143 Episode: Finished after 200 time steps\n144 Episode: Finished after 200 time steps\n145 Episode: Finished after 200 time steps\n146 Episode: Finished after 159 time steps\n147 Episode: Finished after 123 time steps\n148 Episode: Finished after 114 time steps\n149 Episode: Finished after 200 time steps\n150 Episode: Finished after 54 time steps\n151 Episode: Finished after 200 time steps\n152 Episode: Finished after 115 time steps\n153 Episode: Finished after 197 time steps\n154 Episode: Finished after 138 time steps\n155 Episode: Finished after 91 time steps\n156 Episode: Finished after 200 time steps\n157 Episode: Finished after 200 time steps\n158 Episode: Finished after 200 time steps\n159 Episode: Finished after 159 time steps\n160 Episode: Finished after 153 time steps\n161 Episode: Finished after 130 time steps\n162 Episode: Finished after 59 time steps\n163 Episode: Finished after 148 time steps\n164 Episode: Finished after 200 time steps\n165 Episode: Finished after 200 time steps\n166 Episode: Finished after 200 time steps\n167 Episode: Finished after 200 time steps\n168 Episode: Finished after 200 time steps\n169 Episode: Finished after 200 time steps\n170 Episode: Finished after 200 time steps\n171 Episode: Finished after 116 time steps\n172 Episode: Finished after 32 time steps\n173 Episode: Finished after 146 time steps\n174 Episode: Finished after 171 time steps\n175 Episode: Finished after 200 time steps\n176 Episode: Finished after 164 time steps\n177 Episode: Finished after 200 time steps\n178 Episode: Finished after 200 time steps\n179 Episode: Finished after 200 time steps\n180 Episode: Finished after 200 time steps\n181 Episode: Finished after 200 time steps\n182 Episode: Finished after 200 time steps\n183 Episode: Finished after 200 time steps\n184 Episode: Finished after 200 time steps\n185 Episode: Finished after 200 time steps\n186 Episode: Finished after 200 time steps\n连续成功 10 次\n187 Episode: Finished after 200 time steps\n"
    },
    {
     "output_type": "display_data",
     "data": {
      "text/plain": "<Figure size 600x400 with 1 Axes>",
      "image/svg+xml": "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\"?>\r\n<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\r\n  \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\r\n<!-- Created with matplotlib (https://matplotlib.org/) -->\r\n<svg height=\"316.4pt\" version=\"1.1\" viewBox=\"0 0 467.4 316.4\" width=\"467.4pt\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\r\n <metadata>\r\n  <rdf:RDF xmlns:cc=\"http://creativecommons.org/ns#\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">\r\n   <cc:Work>\r\n    <dc:type rdf:resource=\"http://purl.org/dc/dcmitype/StillImage\"/>\r\n    <dc:date>2020-08-24T19:29:21.878260</dc:date>\r\n    <dc:format>image/svg+xml</dc:format>\r\n    <dc:creator>\r\n     <cc:Agent>\r\n      <dc:title>Matplotlib v3.3.1, https://matplotlib.org/</dc:title>\r\n     </cc:Agent>\r\n    </dc:creator>\r\n   </cc:Work>\r\n  </rdf:RDF>\r\n </metadata>\r\n <defs>\r\n  <style type=\"text/css\">*{stroke-linecap:butt;stroke-linejoin:round;}</style>\r\n </defs>\r\n <g id=\"figure_1\">\r\n  <g id=\"patch_1\">\r\n   <path d=\"M 0 316.4 \r\nL 467.4 316.4 \r\nL 467.4 0 \r\nL 0 0 \r\nz\r\n\" style=\"fill:none;\"/>\r\n  </g>\r\n  <g id=\"axes_1\">\r\n   <g clip-path=\"url(#pf2adcf5587)\">\r\n    <image height=\"302\" id=\"image0373589854\" transform=\"scale(1 -1)translate(0 -302)\" width=\"453\" x=\"7.2\" xlink:href=\"data:image/png;base64,\r\niVBORw0KGgoAAAANSUhEUgAAAcUAAAEuCAYAAAD/QgnFAAAGqklEQVR4nO3cv2vcdRzH8ff3ck1Ka2sw9AdBCBRqBB3axXYxZFDQ7m4igoN/gGO3zg4W/4KOTsVFl4KCQgRRqw4pFMSKDWJb+kPT2CaXr8uLom2DEmu+uevjseU+3PEaju/zuLtc07ZtWwBA9boeAADbhSgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAIQoAkCIIgCEKAJAiCIAhCgCQIgiAES/6wHA4+XkyZN19uzZrmc84Ny5czU9Pd31DDomisCWWlpaqsXFxa5nPGB1dbXrCWwD3j4FgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIDonzp1qusNwGPk/PnzXU94qNOnT9fk5GTXM+hYf25urusNwGNkYWFh0/ftNU31es29vweD9WofxaiqOnbsWB04cOARPRrDqj8/P9/1BuAxcubMmU3f960TR+uNV47UzdV9VVX17gcLdfHnazXRW66dY8u1eOlKtZus5PHjx2tmZmbT2xgNfhAcGBpjY726svZcfX/rxWqrV/OvvlbzVTU1vlTP7/mkTrzzfq3cXet6JkPMF22AIdLUT7efrfa+S9e1u9P11fWXauB1Pv+RKAJD4+LvR+tG3jq93621qWrb5qFn8G+JIjA01tteuWzxf/LsAobG07su1hP9613PYISJIjA0do/drL39q1X3/SPGeG+lXnjq4+o3q90MY2T4VBoYKkcmP62J31b+9vnhvonLNbXjcoerGBWiCAyVXtPWc3u/eOD2tUEHYxg5oghsqYMHD9bs7Oym7js1NbXhWdM0dfiZw3VndX1Tj93vuxxS1bTtZn//AWBrXf7yw/rlm48eftj06sib79XYjomtHcVI8UUbAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIbC8q8/1tULn2943hvbsYVrGFWiCAyFweoftbZya8PzQy+/Xb3++BYuYhSJIjASmqZXTdN0PYMhJ4oAEKIIACGKABCiCAAhigAQoggAIYoAEKIIACGKABCiCAAhigAQoggAIYoAEKIIACGKABCiCAAhigAQoggAIYoAEKIIACGKABCiCAAhigAQoggAIYoAEKIIACGKABCiCAAhigAQoggAIYoAEKIIACGKABCiCGx764O1unrhsw3P90zP1sST+7dwEaNKFIFtr10f1PUfvt7wfPf+QzWxZ2oLFzGqRBEAQhQBIEQRAEIUASBEEQBCFAEgRBEAQhQBIEQRAEIUASBEEQBCFAEgRBEAQhQBIEQRAEIUASBEEQBCFAEgRBEAQhQBIEQRAEIUASBEEQBCFAEgRBEAQhQBIPpdDwD4J72xfs3MvV7Vtvduu3Hpu7p56dsOVzGKmrb9y7MMYEis3VmuwZ3bVVU1Nr6r+jt3d7yIUSCKABA+UwSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUACFEEgBBFAAhRBIAQRQAIUQSAEEUAiD8BicCivkF2l2MAAAAASUVORK5CYII=\" y=\"-7.2\"/>\r\n   </g>\r\n  </g>\r\n </g>\r\n <defs>\r\n  <clipPath id=\"pf2adcf5587\">\r\n   <rect height=\"302\" width=\"453\" x=\"7.2\" y=\"7.2\"/>\r\n  </clipPath>\r\n </defs>\r\n</svg>\r\n",
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAdMAAAE8CAYAAACb7Fv6AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAHYUlEQVR4nO3cz4uUBRzH8e/szvqzTFEjFFJCSS9RChVR4NVbQf+Af4b/Q/9Ah8hDl07eIihB9pJ20YKobcEfYIsYG5qtuTozTwfJInc05uPOODOv123ny/PwPQy8efZ55mk1TVMAwOBmRr0AAIw7MQWAkJgCQEhMASAkpgAQElMACLWfMPe7GQB4oNVv4MoUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITao14A4GnrrK5Ud/VOVVXNbthS7U1bR7wRk05MgbHW9Lq1vHiuqmkefnbz6vd16+p3VVX10uvHa++b749oO6aFmAJjrdft1NX5z6qa3qhXYYq5ZwoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAmOtNTNbO1450ne+cuNSrd5eHuJGTCMxBcbazGy7dh16r+/89tJCrd66McSNmEZiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFJl7T9KppmlGvwQQTU2Dszc5tqvbmbX3nl776uHqde0PciGkjpsDY2/ri/tp16N2+8173/hC3YRqJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAofaoFwD428mTJ+v06dMDHfvhO/vrg7f3rTnrdrt15MgbtXq/N9C5z5w5U3v37h3oWKaDmALPjOvXr9fCwsJAxy6/+nxVrR3Tpmlq8efF+vNeZ6BzdzqDHcf0EFNgYvSaVv14+61qmtbDz3Zv/KV2zl0Z3VJMBTEFJsbFm8dq6e6Bqvonpkt3D9Rr2+6PbimmggeQgImw0n2hfu/sqn+HtKrqXm9zffvb8eo0c6NZjKkgpsBEuHbnYP3R2THqNZhSYgpMhJlWr6oGe1oXUmIKTISDz12o7XO/rjnb1l6uVqsZ8kZMEzEFJkRTL2/5qVr/uTrduWGpju74umbLz1tYP57mBSZCt9ur3e0f6ui2G1VV9dHn39TiteXaOLNSm2ZX6u59MWX9PDamZ8+eHdIaAA9e2jCoT764UJ9+efHh391ur57WP3bPnTtXly9ffkpnY1wdO3as7+yxMZ2fn3/auwD0lcS01zTV667PfdHz58/X9u3b1+XcjI/HxbTVNI/98rljDwzNiRMn6tSpU6Ne4xFXrlypffvWflUhU6XVb+ABJAAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACHnRPfDM2LNnTx0+fHjUazxibm5u1CvwjPM6QQD4f7xOEADWi5gCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQmIKACExBYCQmAJASEwBICSmABASUwAIiSkAhMQUAEJiCgAhMQWAkJgCQEhMASAkpgAQElMACIkpAITEFABCYgoAITEFgJCYAkBITAEgJKYAEBJTAAiJKQCExBQAQu0nzFtD2QIAxpgrUwAIiSkAhMQUAEJiCgAhMQWAkJgCQOgvx5rEaGoSUygAAAAASUVORK5CYII=\n"
     },
     "metadata": {
      "needs_background": "light"
     }
    }
   ],
   "source": [
    "# main\n",
    "cartpole_env = Environment()\n",
    "cartpole_env.run()"
   ]
  }
 ]
}