{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import gym\n",
    "import time\n",
    "from stable_baselines3.common.env_checker import check_env"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "class GoLeftEnv(gym.Env):\n",
    "    metadata = {\"render.modes\": [\"console\"]}\n",
    "\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "\n",
    "        # 初始位置\n",
    "        self.pos = 9\n",
    "\n",
    "        # 动作空间，这个环境中只有 左、右两个动作\n",
    "        self.action_space = gym.spaces.Discrete(2)\n",
    "\n",
    "        # 状态空间，一维数轴\n",
    "\n",
    "        self.observation_space = gym.spaces.Box(\n",
    "            low=0, high=10, shape=(1,), dtype=np.float32\n",
    "        )\n",
    "\n",
    "    def reset(self):\n",
    "        # 重置位置\n",
    "        self.pos = 9\n",
    "\n",
    "        # 当前状态\n",
    "        return np.array([self.pos], dtype=np.float32)\n",
    "\n",
    "    def step(self, action):\n",
    "        # 执行动作\n",
    "        if action == 0:\n",
    "            self.pos -= 1\n",
    "\n",
    "        if action == 1:\n",
    "            self.pos += 1\n",
    "\n",
    "        self.pos = np.clip(self.pos,0,10) #小于0的赋值为0，大于10的赋值为10\n",
    "\n",
    "        #判断游戏结束\n",
    "        done = self.pos ==0\n",
    "\n",
    "        reward = 1 if self.pos ==0 else 0\n",
    "\n",
    "        return np.array([self.pos],dtype=np.float32),reward,bool(done),{}\n",
    "    \n",
    "    def render(self,model='console'):\n",
    "        if model != 'console':\n",
    "            raise NotImplementedError()\n",
    "        print(self.pos)\n",
    "    \n",
    "    def close(self):\n",
    "        pass"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "env = GoLeftEnv()\n",
    "check_env(env,warn=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using cuda device\n"
     ]
    }
   ],
   "source": [
    "from stable_baselines3 import PPO\n",
    "from stable_baselines3.common.env_util import make_vec_env #Monitor+Vec\n",
    "\n",
    "\n",
    "\n",
    "# 包装环境\n",
    "train_env = make_vec_env(lambda:GoLeftEnv(),n_envs=2)\n",
    "#定义模型\n",
    "model = PPO('MlpPolicy',train_env,verbose=1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.predict(env.reset())[0].item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第0步:state:[9.]    action:0     reward:0\n",
      "第1步:state:[8.]    action:1     reward:0\n",
      "第2步:state:[9.]    action:0     reward:0\n",
      "第3步:state:[8.]    action:0     reward:0\n",
      "第4步:state:[7.]    action:0     reward:0\n",
      "第5步:state:[6.]    action:1     reward:0\n",
      "第6步:state:[7.]    action:0     reward:0\n",
      "第7步:state:[6.]    action:0     reward:0\n",
      "第8步:state:[5.]    action:1     reward:0\n",
      "第9步:state:[6.]    action:1     reward:0\n",
      "第10步:state:[7.]    action:0     reward:0\n",
      "第11步:state:[6.]    action:0     reward:0\n",
      "第12步:state:[5.]    action:1     reward:0\n",
      "第13步:state:[6.]    action:1     reward:0\n",
      "第14步:state:[7.]    action:1     reward:0\n",
      "第15步:state:[8.]    action:0     reward:0\n",
      "第16步:state:[7.]    action:1     reward:0\n",
      "第17步:state:[8.]    action:0     reward:0\n",
      "第18步:state:[7.]    action:1     reward:0\n",
      "第19步:state:[8.]    action:1     reward:0\n",
      "第20步:state:[9.]    action:1     reward:0\n",
      "第21步:state:[10.]    action:1     reward:0\n",
      "第22步:state:[10.]    action:0     reward:0\n",
      "第23步:state:[9.]    action:1     reward:0\n",
      "第24步:state:[10.]    action:1     reward:0\n",
      "第25步:state:[10.]    action:1     reward:0\n",
      "第26步:state:[10.]    action:0     reward:0\n",
      "第27步:state:[9.]    action:1     reward:0\n",
      "第28步:state:[10.]    action:1     reward:0\n",
      "第29步:state:[10.]    action:1     reward:0\n",
      "第30步:state:[10.]    action:1     reward:0\n",
      "第31步:state:[10.]    action:0     reward:0\n",
      "第32步:state:[9.]    action:1     reward:0\n",
      "第33步:state:[10.]    action:1     reward:0\n",
      "第34步:state:[10.]    action:1     reward:0\n",
      "第35步:state:[10.]    action:0     reward:0\n",
      "第36步:state:[9.]    action:1     reward:0\n",
      "第37步:state:[10.]    action:1     reward:0\n",
      "第38步:state:[10.]    action:0     reward:0\n",
      "第39步:state:[9.]    action:0     reward:0\n",
      "第40步:state:[8.]    action:1     reward:0\n",
      "第41步:state:[9.]    action:1     reward:0\n",
      "第42步:state:[10.]    action:0     reward:0\n",
      "第43步:state:[9.]    action:0     reward:0\n",
      "第44步:state:[8.]    action:1     reward:0\n",
      "第45步:state:[9.]    action:1     reward:0\n",
      "第46步:state:[10.]    action:0     reward:0\n",
      "第47步:state:[9.]    action:1     reward:0\n",
      "第48步:state:[10.]    action:1     reward:0\n",
      "第49步:state:[10.]    action:0     reward:0\n",
      "第50步:state:[9.]    action:0     reward:0\n",
      "第51步:state:[8.]    action:0     reward:0\n",
      "第52步:state:[7.]    action:1     reward:0\n",
      "第53步:state:[8.]    action:1     reward:0\n",
      "第54步:state:[9.]    action:1     reward:0\n",
      "第55步:state:[10.]    action:0     reward:0\n",
      "第56步:state:[9.]    action:0     reward:0\n",
      "第57步:state:[8.]    action:0     reward:0\n",
      "第58步:state:[7.]    action:1     reward:0\n",
      "第59步:state:[8.]    action:0     reward:0\n",
      "第60步:state:[7.]    action:1     reward:0\n",
      "第61步:state:[8.]    action:0     reward:0\n",
      "第62步:state:[7.]    action:1     reward:0\n",
      "第63步:state:[8.]    action:1     reward:0\n",
      "第64步:state:[9.]    action:0     reward:0\n",
      "第65步:state:[8.]    action:1     reward:0\n",
      "第66步:state:[9.]    action:0     reward:0\n",
      "第67步:state:[8.]    action:0     reward:0\n",
      "第68步:state:[7.]    action:0     reward:0\n",
      "第69步:state:[6.]    action:0     reward:0\n",
      "第70步:state:[5.]    action:1     reward:0\n",
      "第71步:state:[6.]    action:1     reward:0\n",
      "第72步:state:[7.]    action:0     reward:0\n",
      "第73步:state:[6.]    action:1     reward:0\n",
      "第74步:state:[7.]    action:0     reward:0\n",
      "第75步:state:[6.]    action:1     reward:0\n",
      "第76步:state:[7.]    action:0     reward:0\n",
      "第77步:state:[6.]    action:0     reward:0\n",
      "第78步:state:[5.]    action:0     reward:0\n",
      "第79步:state:[4.]    action:1     reward:0\n",
      "第80步:state:[5.]    action:0     reward:0\n",
      "第81步:state:[4.]    action:0     reward:0\n",
      "第82步:state:[3.]    action:0     reward:0\n",
      "第83步:state:[2.]    action:0     reward:0\n",
      "第84步:state:[1.]    action:1     reward:0\n",
      "第85步:state:[2.]    action:1     reward:0\n",
      "第86步:state:[3.]    action:1     reward:0\n",
      "第87步:state:[4.]    action:0     reward:0\n",
      "第88步:state:[3.]    action:1     reward:0\n",
      "第89步:state:[4.]    action:1     reward:0\n",
      "第90步:state:[5.]    action:0     reward:0\n",
      "第91步:state:[4.]    action:0     reward:0\n",
      "第92步:state:[3.]    action:1     reward:0\n",
      "第93步:state:[4.]    action:1     reward:0\n",
      "第94步:state:[5.]    action:1     reward:0\n",
      "第95步:state:[6.]    action:1     reward:0\n",
      "第96步:state:[7.]    action:0     reward:0\n",
      "第97步:state:[6.]    action:1     reward:0\n",
      "第98步:state:[7.]    action:0     reward:0\n",
      "第99步:state:[6.]    action:1     reward:0\n"
     ]
    }
   ],
   "source": [
    "import gym\n",
    "#测试一个环境\n",
    "def test(model,env):\n",
    "    state = env.reset()\n",
    "    over=False\n",
    "    step = 0\n",
    "\n",
    "    for i in range(100):\n",
    "        action = model.predict(state)[0]\n",
    "        next_state,reward,over,_=env.step(action)\n",
    "\n",
    "        print(f'第{step}步:state:{state}    action:{action}     reward:{reward}')\n",
    "        state = next_state\n",
    "        step += 1\n",
    "\n",
    "        if over:\n",
    "            break\n",
    "\n",
    "test(model,env)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------\n",
      "| rollout/           |          |\n",
      "|    ep_len_mean     | 20.8     |\n",
      "|    ep_rew_mean     | 1        |\n",
      "| time/              |          |\n",
      "|    fps             | 655      |\n",
      "|    iterations      | 1        |\n",
      "|    time_elapsed    | 6        |\n",
      "|    total_timesteps | 4096     |\n",
      "---------------------------------\n",
      "----------------------------------------\n",
      "| rollout/                |            |\n",
      "|    ep_len_mean          | 15.3       |\n",
      "|    ep_rew_mean          | 1          |\n",
      "| time/                   |            |\n",
      "|    fps                  | 458        |\n",
      "|    iterations           | 2          |\n",
      "|    time_elapsed         | 17         |\n",
      "|    total_timesteps      | 8192       |\n",
      "| train/                  |            |\n",
      "|    approx_kl            | 0.03546957 |\n",
      "|    clip_fraction        | 0.368      |\n",
      "|    clip_range           | 0.2        |\n",
      "|    entropy_loss         | -0.494     |\n",
      "|    explained_variance   | 0.201      |\n",
      "|    learning_rate        | 0.0003     |\n",
      "|    loss                 | -0.0698    |\n",
      "|    n_updates            | 30         |\n",
      "|    policy_gradient_loss | -0.0465    |\n",
      "|    value_loss           | 0.0154     |\n",
      "----------------------------------------\n",
      "第0步:state:[9.]    action:0     reward:0\n",
      "第1步:state:[8.]    action:0     reward:0\n",
      "第2步:state:[7.]    action:0     reward:0\n",
      "第3步:state:[6.]    action:0     reward:0\n",
      "第4步:state:[5.]    action:0     reward:0\n",
      "第5步:state:[4.]    action:0     reward:0\n",
      "第6步:state:[3.]    action:0     reward:0\n",
      "第7步:state:[2.]    action:1     reward:0\n",
      "第8步:state:[3.]    action:0     reward:0\n",
      "第9步:state:[2.]    action:0     reward:0\n",
      "第10步:state:[1.]    action:0     reward:1\n"
     ]
    }
   ],
   "source": [
    "model.learn(5000)\n",
    "#测试\n",
    "test(model,env)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Gym",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.16"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
