{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gym\n",
    "\n",
    "\n",
    "#自定义一个Wrapper\n",
    "class Pendulum(gym.Wrapper):\n",
    "\n",
    "    def __init__(self):\n",
    "        env = gym.make('Pendulum-v1')\n",
    "        super().__init__(env)\n",
    "        self.env = env\n",
    "\n",
    "    def reset(self):\n",
    "        state, _ = self.env.reset()\n",
    "        return state\n",
    "\n",
    "    def step(self, action):\n",
    "        state, reward, done, _, info = self.env.step(action)\n",
    "        return state, reward, done, info\n",
    "\n",
    "\n",
    "env = Pendulum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试一个环境\n",
    "# 函数： 输入：环境\n",
    "#       输出：测试环境\n",
    "def test(env, wrap_action_in_list=False):\n",
    "    print(env)\n",
    "    state = env.reset()\n",
    "    over = False\n",
    "    step = 0\n",
    "\n",
    "    while not over:\n",
    "        action = env.action_space.sample()\n",
    "\n",
    "        if wrap_action_in_list:\n",
    "            action = [action]\n",
    "\n",
    "        next_state, reward, over, _ = env.step(action)\n",
    "\n",
    "        if step % 20 == 0:\n",
    "            print(f'步数：{step}, 状态：{state}, 动作：{action}, 奖励：{reward}')\n",
    "\n",
    "        if step > 200:\n",
    "            break\n",
    "\n",
    "        state = next_state\n",
    "        step += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<Pendulum<TimeLimit<OrderEnforcing<PassiveEnvChecker<PendulumEnv<Pendulum-v1>>>>>>\n",
      "步数：0, 状态：[-0.9485196 -0.3167184 -0.7127596], 动作：[1.654349], 奖励：-8.002132276987856\n",
      "步数：20, 状态：[-0.98445374  0.17564416  0.9585144 ], 动作：[-0.95357794], 奖励：-8.884202600985835\n",
      "步数：40, 状态：[-0.99971926  0.0236936  -0.5106344 ], 动作：[-1.1745076], 奖励：-9.748734883979335\n",
      "步数：60, 状态：[-0.9704831  -0.24116904 -0.08722568], 动作：[0.9827593], 奖励：-8.400260412378952\n",
      "步数：80, 状态：[-0.9866331   0.16295727  1.319678  ], 动作：[0.06438919], 奖励：-9.042079771335903\n",
      "步数：100, 状态：[-0.99616694 -0.08747236 -1.1547161 ], 动作：[1.9286486], 奖励：-9.464023720650783\n",
      "步数：120, 状态：[-0.9715424  -0.23686576  1.0292535 ], 动作：[0.0436966], 奖励：-8.530178149881364\n",
      "步数：140, 状态：[-0.9600606   0.27979225 -0.96577847], 动作：[-1.1835487], 奖励：-8.262923007043444\n",
      "步数：160, 状态：[-0.84107107 -0.54092467  0.9636099 ], 动作：[1.0250952], 奖励：-6.699095821068759\n",
      "步数：180, 状态：[-0.91165006  0.41096738 -0.24980234], 动作：[0.34125254], 奖励：-7.3943030224980175\n",
      "步数：200, 状态：[-0.93776625 -0.3472671  -1.5725731 ], 动作：[0.226016], 奖励：-8.014369768542762\n"
     ]
    }
   ],
   "source": [
    "test(env)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<StepLimitWrapper<Pendulum<TimeLimit<OrderEnforcing<PassiveEnvChecker<PendulumEnv<Pendulum-v1>>>>>>>\n",
      "步数：0, 状态：[ 0.98243934  0.1865822  -0.07060298], 动作：[0.9132554], 奖励：-0.0365570786218706\n",
      "步数：20, 状态：[-0.7194758 -0.6945175  7.5498214], 动作：[0.08287454], 奖励：-11.335120843412106\n",
      "步数：40, 状态：[-0.4555774  -0.89019614 -5.733361  ], 动作：[0.69246155], 奖励：-7.464810524996308\n",
      "步数：60, 状态：[0.356259   0.93438727 2.803752  ], 动作：[-1.4670881], 奖励：-2.2439821305590306\n",
      "步数：80, 状态：[ 0.02561292 -0.99967194 -2.0167444 ], 动作：[0.92998976], 奖励：-2.79517372007571\n"
     ]
    }
   ],
   "source": [
    "# 修改最大步数\n",
    "from typing import Tuple\n",
    "from gym.core import Env\n",
    "\n",
    "\n",
    "class StepLimitWrapper(gym.Wrapper):\n",
    "    def __init__(self, env: Env):\n",
    "        super().__init__(env)\n",
    "        self.current_step = 0\n",
    "\n",
    "    def reset(self):\n",
    "        self.current_step = 0\n",
    "        return self.env.reset()\n",
    "\n",
    "    def step(self, action):\n",
    "        self.current_step += 1\n",
    "        state, reward, done, info = self.env.step(action)\n",
    "\n",
    "        # 修改done字段\n",
    "        if self.current_step >= 100:\n",
    "            done = True\n",
    "\n",
    "        return state, reward, done, info\n",
    "\n",
    "test(StepLimitWrapper(env))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<NormalizeActionWrapper<Pendulum<TimeLimit<OrderEnforcing<PassiveEnvChecker<PendulumEnv<Pendulum-v1>>>>>>>\n",
      "步数：0, 状态：[ 0.18524757 -0.9826919  -0.7371216 ], 动作：[0.67945147], 奖励：-1.9715606958873264\n",
      "步数：20, 状态：[0.31355086 0.94957143 0.29341298], 动作：[-0.09412747], 奖励：-1.575786838808649\n",
      "步数：40, 状态：[ 0.6616328  -0.74982804  1.2496188 ], 动作：[-0.2623521], 奖励：-0.8749919233617067\n",
      "步数：60, 状态：[ 0.0783648   0.99692476 -4.180228  ], 动作：[-0.3174111], 奖励：-3.974643538408751\n",
      "步数：80, 状态：[-0.69145614 -0.7224184   5.8226213 ], 动作：[-0.3266237], 奖励：-8.8393507976012\n",
      "步数：100, 状态：[-0.8366796   0.54769266 -5.9735026 ], 动作：[-0.8915447], 奖励：-10.132853810331078\n",
      "步数：120, 状态：[-0.9950617  -0.09925841  6.8076344 ], 动作：[0.2273723], 奖励：-13.889241777450307\n",
      "步数：140, 状态：[-0.3607267  -0.93267155 -4.7865205 ], 动作：[-0.9853524], 奖励：-6.055040753296405\n",
      "步数：160, 状态：[0.6190994  0.78531265 1.6553317 ], 动作：[0.9635093], 奖励：-1.0907125996132323\n",
      "步数：180, 状态：[ 0.64910537 -0.7606985   0.8077902 ], 动作：[-0.3719924], 奖励：-0.8125584400336769\n",
      "步数：200, 状态：[ 0.30227983  0.95321923 -3.1930358 ], 动作：[-0.40807655], 奖励：-2.616684516595537\n"
     ]
    }
   ],
   "source": [
    "# 修改动作空间\n",
    "import numpy as np\n",
    "\n",
    "class NormalizeActionWrapper(gym.Wrapper):\n",
    "\n",
    "    def __init__(self,env):\n",
    "        \n",
    "        #获取动作空间\n",
    "        action_space = env.action_space\n",
    "\n",
    "        #动作空间必须是连续值\n",
    "\n",
    "        assert isinstance(action_space,gym.spaces.Box)\n",
    "\n",
    "        #重新定义动作空间，在正负一之间的连续值\n",
    "        #这里其实之影响env.action_space.sample的返回结果\n",
    "        env.action_space = gym.spaces.Box(low = -1,high=1,shape = action_space.shape,dtype=np.float32)\n",
    "\n",
    "        super().__init__(env)\n",
    "\n",
    "        def reset(self):\n",
    "            return self.env.reset()\n",
    "        \n",
    "        def step(self,action):\n",
    "            #重新放缩动作的值域\n",
    "            action = action *2.0\n",
    "\n",
    "            if action >2.0:\n",
    "                action = 2.0\n",
    "\n",
    "            if action < -2.0:\n",
    "                action = 2.0\n",
    "            return self.env.step(action)\n",
    "\n",
    "\n",
    "\n",
    "test(NormalizeActionWrapper(env))\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using cuda device\n",
      "------------------------------------\n",
      "| time/                 |          |\n",
      "|    fps                | 127      |\n",
      "|    iterations         | 100      |\n",
      "|    time_elapsed       | 3        |\n",
      "|    total_timesteps    | 500      |\n",
      "| train/                |          |\n",
      "|    entropy_loss       | -1.42    |\n",
      "|    explained_variance | -0.1     |\n",
      "|    learning_rate      | 0.0007   |\n",
      "|    n_updates          | 99       |\n",
      "|    policy_loss        | -20.8    |\n",
      "|    std                | 1        |\n",
      "|    value_loss         | 363      |\n",
      "------------------------------------\n",
      "------------------------------------\n",
      "| time/                 |          |\n",
      "|    fps                | 168      |\n",
      "|    iterations         | 200      |\n",
      "|    time_elapsed       | 5        |\n",
      "|    total_timesteps    | 1000     |\n",
      "| train/                |          |\n",
      "|    entropy_loss       | -1.44    |\n",
      "|    explained_variance | -0.186   |\n",
      "|    learning_rate      | 0.0007   |\n",
      "|    n_updates          | 199      |\n",
      "|    policy_loss        | -6.65    |\n",
      "|    std                | 1.02     |\n",
      "|    value_loss         | 32.8     |\n",
      "------------------------------------\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<stable_baselines3.a2c.a2c.A2C at 0x240dfc00460>"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from stable_baselines3 import A2C\n",
    "from stable_baselines3.common.monitor import Monitor\n",
    "from stable_baselines3.common.vec_env import DummyVecEnv\n",
    "\n",
    "#使用Monitor Wrapper,会在训练的过程中输出rollout/ep_len_mean和rollout/ep_rew_mean,就是增加些日志\n",
    "\n",
    "env = DummyVecEnv([lambda: Monitor(Pendulum())])\n",
    "\n",
    "A2C('MlpPolicy', env, verbose=1).learn(1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<stable_baselines3.common.vec_env.vec_normalize.VecNormalize object at 0x000002408BE23070>\n",
      "步数：0, 状态：[[-0.00676237 -0.00368545  0.00577242]], 动作：[array([1.1523112], dtype=float32)], 奖励：[-10.]\n",
      "步数：20, 状态：[[2.2647939  1.5783232  0.44640988]], 动作：[array([0.01591118], dtype=float32)], 奖励：[-0.12272195]\n",
      "步数：40, 状态：[[ 1.499792   -1.3989912  -0.42802542]], 动作：[array([-1.8438398], dtype=float32)], 奖励：[-0.07508214]\n",
      "步数：60, 状态：[[0.23525646 0.9727486  1.2676628 ]], 动作：[array([-1.7104793], dtype=float32)], 奖励：[-0.07027234]\n",
      "步数：80, 状态：[[-1.0602244  -0.44084117 -1.189618  ]], 动作：[array([-0.8350897], dtype=float32)], 奖励：[-0.07657163]\n",
      "步数：100, 状态：[[-1.1648761  -0.32230386  1.1165456 ]], 动作：[array([-1.0691822], dtype=float32)], 奖励：[-0.07095935]\n",
      "步数：120, 状态：[[-0.49567068  0.77412    -0.37112594]], 动作：[array([-0.62574637], dtype=float32)], 奖励：[-0.05085297]\n",
      "步数：140, 状态：[[ 0.6726645  -1.2823495   0.35681337]], 动作：[array([0.17599098], dtype=float32)], 奖励：[-0.03872017]\n",
      "步数：160, 状态：[[0.78195626 1.2575212  0.5672693 ]], 动作：[array([-0.89151704], dtype=float32)], 奖励：[-0.03604311]\n",
      "步数：180, 状态：[[-0.9755468  -0.41693377 -1.1616725 ]], 动作：[array([-1.8085713], dtype=float32)], 奖励：[-0.04910452]\n",
      "步数：200, 状态：[[-0.89013785 -0.5076129   0.75817585]], 动作：[array([0.8475342], dtype=float32)], 奖励：[-0.04563002]\n"
     ]
    }
   ],
   "source": [
    "from stable_baselines3.common.vec_env import VecNormalize,VecFrameStack\n",
    "\n",
    "#VecNormalize,他会对state和reward进行Normalize\n",
    "\n",
    "env = DummyVecEnv([Pendulum])\n",
    "env = VecNormalize(env)\n",
    "test(env,wrap_action_in_list=True)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Gym",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.16"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
