{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "f7003205-df1d-44fc-a9f3-77da364ca7a2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m[12-21 15:37:14 MainThread @utils.py:73]\u001b[0m paddlepaddle version: 2.2.1.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/patchy/miniconda3/envs/paddle/lib/python3.7/site-packages/gym/envs/registration.py:250: DeprecationWarning: SelectableGroups dict interface is deprecated. Use select.\n",
      "  for plugin in metadata.entry_points().get(entry_point, []):\n"
     ]
    }
   ],
   "source": [
    "import parl\n",
    "from parl.utils import logger\n",
    "import paddle\n",
    "import copy\n",
    "import numpy as np\n",
    "import os\n",
    "import gym\n",
    "import random\n",
    "import collections"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "788b0f51-cf2f-47dd-bffb-a432e2c205e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "learn_freq = 3 # 训练频率，不需要每一个step都learn，攒一些新增经验后再learn，提高效率   \n",
    "memory_warmup_size = 50  # episode_replay_memory 里需要预存一些经验数据，再开启训练\n",
    "batch_size = 8   # 每次给agent learn的数据数量，从replay memory随机里sample一批数据出来\n",
    "lr = 6e-4 # 学习率\n",
    "gamma = 0.99 # reward 的衰减因子，一般取 0.9 到 0.999 不等\n",
    "num_step=10\n",
    "episode_size=500    # replay memory的大小(数据集的大小)，越大越占用内存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "7104967c-c1af-4370-a631-fb9205642c2d",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Model(paddle.nn.Layer):\n",
    "    def __init__(self, obs_dim,act_dim):\n",
    "        super(Model,self).__init__()\n",
    "        self.hidden_size=64\n",
    "        self.first=False\n",
    "        self.act_dim=act_dim\n",
    "        # 3层全连接网络\n",
    "        self.fc1 =  paddle.nn.Sequential(\n",
    "                                        paddle.nn.Linear(obs_dim,128),\n",
    "                                        paddle.nn.ReLU())\n",
    "\n",
    "        self.fc2 = paddle.nn.Sequential(\n",
    "                                        paddle.nn.Linear(self.hidden_size,128),\n",
    "                                        paddle.nn.ReLU())\n",
    "        self.fc3 = paddle.nn.Linear(128,act_dim)\n",
    "        self.lstm=paddle.nn.LSTM(128,self.hidden_size,1)      #[input_size,hidden_size,num_layers]\n",
    "\n",
    "    def init_lstm_state(self,batch_size):\n",
    "        self.h=paddle.zeros(shape=[1,batch_size,self.hidden_size],dtype='float32')\n",
    "        self.c=paddle.zeros(shape=[1,batch_size,self.hidden_size],dtype='float32')\n",
    "        self.first=True\n",
    "\n",
    "    def forward(self, obs):\n",
    "        # 输入state，输出所有action对应的Q，[Q(s,a1), Q(s,a2), Q(s,a3)...]\n",
    "        obs = self.fc1(obs)\n",
    "        #每次训练开始前重置\n",
    "        if (self.first):\n",
    "            x,(h,c) = self.lstm(obs,(self.h,self.c))  #obs:[batch_size,num_steps,input_size]\n",
    "            self.first=False\n",
    "        else:\n",
    "            x,(h,c) = self.lstm(obs)  #obs:[batch_size,num_steps,input_size]\n",
    "        x=paddle.reshape(x,shape=[-1,self.hidden_size])\n",
    "        h2 = self.fc2(x)\n",
    "        Q = self.fc3(h2)\n",
    "        return Q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "cba9a7c9-3c6d-4c8a-8bad-ba47f9a19c71",
   "metadata": {},
   "outputs": [],
   "source": [
    "class DRQN(parl.Algorithm):\n",
    "    def __init__(self, model, act_dim=None, gamma=None, lr=None):\n",
    "        self.model = model\n",
    "        self.target_model = copy.deepcopy(model)    #复制predict网络得到target网络，实现fixed-Q-target 功能\n",
    "\n",
    "        #数据类型是否正确\n",
    "        assert isinstance(act_dim, int)\n",
    "        assert isinstance(gamma, float)\n",
    "        assert isinstance(lr, float)\n",
    "\n",
    "        self.act_dim = act_dim\n",
    "        self.gamma = gamma\n",
    "        self.lr = lr\n",
    "        self.optimizer=paddle.optimizer.Adam(learning_rate=self.lr,parameters=self.model.parameters())    # 使用Adam优化器\n",
    "    \n",
    "    #预测功能\n",
    "    def predict(self, obs):\n",
    "        return self.model.forward(obs)\n",
    "        \n",
    "    def learn(self, obs, action, reward, next_obs, terminal):\n",
    "        #将数据拉平\n",
    "        action=paddle.reshape(action,shape=[-1])\n",
    "        reward=paddle.reshape(reward,shape=[-1])\n",
    "        terminal=paddle.reshape(terminal,shape=[-1])\n",
    "\n",
    "        # 从target_model中获取 max Q' 的值，用于计算target_Q\n",
    "        next_predict_Q = self.target_model.forward(next_obs)\n",
    "        best_v = paddle.max(next_predict_Q, axis=-1)#next_predict_Q的每一个维度（行）都求最大值，因为每一行就对应一个St,行数就是我们输入数据的批次大小\n",
    "        best_v.stop_gradient = True                 #阻止梯度传递,因为要固定模型参数\n",
    "        terminal = paddle.cast(terminal, dtype='float32')    #转换数据类型，转换为float32\n",
    "        target = reward + (1.0 - terminal) * self.gamma * best_v  #Q的现实值\n",
    "\n",
    "        predict_Q = self.model.forward(obs)  # 获取Q预测值\n",
    "\n",
    "        #接下来一步是获取action所对应的Q(s,a)\n",
    "        action_onehot = paddle.nn.functional.one_hot(action, self.act_dim)    # 将action转onehot向量，比如：3 => [0,0,0,1,0]\n",
    "        action_onehot = paddle.cast(action_onehot, dtype='float32')        \n",
    "        predict_action_Q = paddle.sum(\n",
    "                                      paddle.multiply(action_onehot, predict_Q)              #逐元素相乘，拿到action对应的 Q(s,a)\n",
    "                                      , axis=1)  #对每行进行求和运算,注意此处进行求和的真正目的其  # 比如：pred_value = [[2.3, 5.7, 1.2, 3.9, 1.4]], action_onehot = [[0,0,0,1,0]]\n",
    "                                                #实是变换维度，类似于矩阵转置。与target形式相同。 #  ==> pred_action_value = [[3.9]]\n",
    "\n",
    "\n",
    "        # 计算 Q(s,a) 与 target_Q的均方差，得到损失。让一组的输出逼近另一组的输出，是回归问题，故用均方差损失函数\n",
    "\n",
    "        loss=paddle.nn.functional.square_error_cost(predict_action_Q, target)         \n",
    "        cost = paddle.mean(loss)\n",
    "        cost.backward()   #反向传播\n",
    "        self.optimizer.step()  #更新参数\n",
    "        self.optimizer.clear_grad()  #清除梯度\n",
    "\n",
    "    def sync_target(self):\n",
    "        self.target_model = copy.deepcopy(model)    #复制predict网络得到target网络，实现fixed-Q-target 功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "ae80c406-15c1-4701-9a70-63d348d4943e",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Agent(parl.Agent):\n",
    "    def __init__(self,\n",
    "                 algorithm,\n",
    "                 act_dim,\n",
    "                 e_greed=0.1,  \n",
    "                 e_greed_decrement=0 ):\n",
    "\n",
    "        #判断输入数据的类型是否是int型\n",
    "        assert isinstance(act_dim, int)\n",
    "\n",
    "        self.act_dim = act_dim\n",
    "        \n",
    "        #调用Agent父类的对象，将算法类algorithm输入进去,目的是我们可以调用algorithm中的成员\n",
    "        super(Agent, self).__init__(algorithm)\n",
    "\n",
    "        self.global_step = 0          #总运行步骤\n",
    "        self.update_target_steps = 200  # 每隔200个training steps再把model的参数复制到target_model中\n",
    "\n",
    "        self.e_greed = e_greed  # 有一定概率随机选取动作，探索\n",
    "        self.e_greed_decrement = e_greed_decrement  # 随着训练逐步收敛，探索的程度慢慢降低\n",
    "\n",
    "    #参数obs都是单条输入,与learn函数的参数不同\n",
    "    def sample(self, obs):\n",
    "        sample = np.random.rand()  # 产生0~1之间的小数\n",
    "        if sample < self.e_greed:\n",
    "            act = np.random.randint(self.act_dim)  # 探索：每个动作都有概率被选择\n",
    "        else:\n",
    "            act = self.predict(obs)  # 选择最优动作\n",
    "        self.e_greed = max(\n",
    "            0.01, self.e_greed - self.e_greed_decrement)  # 随着训练逐步收敛，探索的程度慢慢降低\n",
    "        return act        \n",
    "\n",
    "    #通过神经网络获取输出\n",
    "    def predict(self, obs):  # 选择最优动作\n",
    "        obs=paddle.to_tensor(obs,dtype='float32')  #将目标数组转换为张量\n",
    "        predict_Q=self.alg.predict(obs).numpy()    #将结果张量转换为数组\n",
    "        act = np.argmax(predict_Q)  # 选择Q最大的下标，即对应的动作\n",
    "        return act\n",
    "\n",
    "    #这里的learn函数主要包括两个功能。1.同步模型参数2.更新模型。这两个功能都是通过调用algorithm算法里面的函数最终实现的。\n",
    "    #注意，此处输入的参数均是一批数据组成的数组\n",
    "    def learn(self, obs, act, reward, next_obs, terminal):\n",
    "        # 每隔200个training steps同步一次model和target_model的参数\n",
    "        if self.global_step % self.update_target_steps == 0:\n",
    "            self.alg.sync_target()\n",
    "        self.global_step += 1      #每执行一次learn函数，总次数+1\n",
    "\n",
    "        #转换为张量\n",
    "        obs=paddle.to_tensor(obs,dtype='float32')\n",
    "        act=paddle.to_tensor(act,dtype='int32')\n",
    "        reward=paddle.to_tensor(reward,dtype='float32')\n",
    "        next_obs=paddle.to_tensor(next_obs,dtype='float32')\n",
    "        terminal=paddle.to_tensor(terminal,dtype='float32')\n",
    "        \n",
    "        #进行学习\n",
    "        self.alg.learn(obs, act, reward, next_obs, terminal)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "da5fcdd1-c042-4163-bbed-f4a7cb376e9b",
   "metadata": {},
   "source": [
    "因为DRQN需要的数据是从一整个episode中采样，所以数据集中的每一个数据都要是一个episode。因此对经验池类进行了改写，原类收集每一step的功能不变，同时每一step都判断是不是一个episode的最后一步，即done是否为True。再新建一个episodemomery类，将所有的episode输入进去，随机挑选一个时间步进行处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "3efd6806-397b-44e7-86f1-a805c6ad23d6",
   "metadata": {},
   "outputs": [],
   "source": [
    "class EpisodeMemory(object):\n",
    "    def __init__(self,episode_size,num_step):\n",
    "        self.buffer = collections.deque(maxlen=episode_size)\n",
    "        self.num_step=num_step   #时间步长\n",
    "\n",
    "    def put(self,episode):\n",
    "        self.buffer.append(episode)\n",
    "        \n",
    "    def sample(self,batch_size):\n",
    "        mini_batch = random.sample(self.buffer, batch_size)  #返回值是个列表\n",
    "        obs_batch, action_batch, reward_batch, next_obs_batch, done_batch = [], [], [], [], []\n",
    "\n",
    "        for experience in mini_batch:\n",
    "            self.num_step = min(self.num_step, len(experience)) #防止序列长度小于预定义长度\n",
    "\n",
    "        for experience in mini_batch:\n",
    "            idx = np.random.randint(0, len(experience)-self.num_step+1)  #随机选取一个时间步的id\n",
    "            s, a, r, s_p, done = [],[],[],[],[]\n",
    "            for i in range(idx,idx+self.num_step):\n",
    "                e1,e2,e3,e4,e5=experience[i][0]\n",
    "                s.append(e1[0][0]),a.append(e2),r.append(e3),s_p.append(e4),done.append(e5)       \n",
    "            obs_batch.append(s)\n",
    "            action_batch.append(a)\n",
    "            reward_batch.append(r)\n",
    "            next_obs_batch.append(s_p)\n",
    "            done_batch.append(done)\n",
    "\n",
    "        #转换数据格式\n",
    "        obs_batch=np.array(obs_batch).astype('float32')\n",
    "        action_batch=np.array(action_batch).astype('float32')\n",
    "        reward_batch=np.array(reward_batch).astype('float32')\n",
    "        next_obs_batch=np.array(next_obs_batch).astype('float32')\n",
    "        done_batch=np.array(done_batch).astype('float32')\n",
    "\n",
    "        #将列表转换为数组并转换数据类型\n",
    "        return obs_batch,action_batch,reward_batch,next_obs_batch,done_batch    \n",
    "\n",
    "    #输出队列的长度\n",
    "    def __len__(self):\n",
    "        return len(self.buffer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "1ca1c130-b890-47bb-962b-9655ebdd4153",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ReplayMemory(object):\n",
    "    def __init__(self,e_rpm):\n",
    "        #创建一个固定长度的队列作为缓冲区域，当队列满时，会自动删除最老的一条信息\n",
    "        self.e_rpm=e_rpm\n",
    "        self.buff=[]\n",
    "    # 增加一条经验到经验池中\n",
    "    def append(self,exp,done):\n",
    "        self.buff.append([exp])\n",
    "        #将一整个episode添加进经验池\n",
    "        if(done):\n",
    "            self.e_rpm.put(self.buff)\n",
    "            self.buff=[]\n",
    "    #输出队列的长度\n",
    "    def __len__(self):\n",
    "        return len(self.buff)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ff8f0e9a-802e-4605-a7fe-707ec97429b4",
   "metadata": {},
   "source": [
    "设定每间隔一定episode就训练一次同时跳出循环。每次训练之前重新初始化LSTM的隐藏层参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "982da780-fcd7-4e0a-8a69-7d85dee9d0ee",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练一个episode\n",
    "def run_episode(env, agent, rpm, e_rpm, obs_shape):   #rpm就是经验池\n",
    "    for step in range(1,learn_freq+1):\n",
    "        #重置环境\n",
    "        obs = env.reset()\n",
    "        while True:\n",
    "            obs=obs.reshape(1,1,obs_shape)\n",
    "            action = agent.sample(obs)  # 采样动作，所有动作都有概率被尝试到\n",
    "            next_obs, reward, done, _ = env.step(action)\n",
    "            rpm.append((obs, action, reward, next_obs, done),done)   #搜集数据\n",
    "            obs = next_obs\n",
    "            if done:\n",
    "                break\n",
    "\n",
    "    #存储足够多的经验之后按照间隔进行训练\n",
    "    if (len(e_rpm) > memory_warmup_size):\n",
    "        #每次训练之前重置LSTM参数\n",
    "        model.init_lstm_state(batch_size)\n",
    "        (batch_obs, batch_action, batch_reward, batch_next_obs,batch_done) = e_rpm.sample(batch_size)\n",
    "        agent.learn(batch_obs, batch_action, batch_reward,batch_next_obs,batch_done)  # s,a,r,s',done\n",
    "\n",
    "# 评估 agent, 跑 5 个episode，总reward求平均\n",
    "def evaluate(env, agent, obs_shape,render=False):\n",
    "    eval_reward = []   #列表存储所有episode的reward\n",
    "    for i in range(5):\n",
    "        obs = env.reset()\n",
    "        episode_reward = 0\n",
    "        while True:\n",
    "            obs=obs.reshape(1,1,obs_shape)\n",
    "            action = agent.predict(obs)  # 预测动作，只选最优动作\n",
    "            obs, reward, done, _ = env.step(action)\n",
    "            episode_reward += reward\n",
    "            if render:\n",
    "                env.render()\n",
    "            if done:\n",
    "                break\n",
    "        eval_reward.append(episode_reward)\n",
    "    return np.mean(eval_reward)  #求平均值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e5844d9f-fcf7-4442-97bc-1521137167ea",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m[12-20 16:02:01 MainThread @machine_info.py:90]\u001b[0m Cannot find available GPU devices, using CPU or other devices now. (Please check whether you can execute `nvidia-smi` command.)\n"
     ]
    }
   ],
   "source": [
    "env = gym.make('CartPole-v1')  \n",
    "action_dim = env.action_space.n  \n",
    "obs_shape = env.observation_space.shape  \n",
    "\n",
    "save_path = './dqn_model.ckpt'\n",
    "\n",
    "e_rpm=EpisodeMemory(episode_size,num_step)\n",
    "rpm = ReplayMemory(e_rpm)  # 实例化DQN的经验回放池\n",
    "# 根据parl框架构建agent\n",
    "model = Model(obs_dim=obs_shape[0],act_dim=action_dim)\n",
    "algorithm = DRQN(model, act_dim=action_dim, gamma=gamma, lr=lr)\n",
    "agent = Agent(\n",
    "    algorithm,\n",
    "    act_dim=action_dim,\n",
    "    e_greed=0.1,  # 有一定概率随机选取动作，探索\n",
    "    e_greed_decrement=8e-7)  # 随着训练逐步收敛，探索的程度慢慢降低\n",
    "\n",
    "# 先往经验池里存一些数据，避免最开始训练的时候样本丰富度不够\n",
    "while len(e_rpm) < memory_warmup_size:\n",
    "    run_episode(env, agent, rpm,e_rpm,obs_shape[0])\n",
    "\n",
    "#定义训练次数\n",
    "max_train_num = 2000\n",
    "best_acc=377.0\n",
    "\n",
    "# agent.restore(save_path)\n",
    "\n",
    "# 开始训练\n",
    "train_num = 0\n",
    "while train_num < max_train_num:  # 训练max_episode个回合，test部分不计算入episode数量\n",
    "    # train part\n",
    "    #for循环的目的是每50次进行一下测试\n",
    "    for i in range(0, 50):\n",
    "        run_episode(env, agent,rpm, e_rpm,obs_shape[0])\n",
    "        train_num += 1\n",
    "    # test part\n",
    "    eval_reward = evaluate(env, agent,obs_shape[0], render=False)  #render=True 查看显示效果\n",
    "\n",
    "    if eval_reward>best_acc:\n",
    "        best_acc=eval_reward\n",
    "        agent.save(save_path)\n",
    "\n",
    "    #将信息写入日志文件\n",
    "    logger.info('train_num:{}    e_greed:{}   test_reward:{}'.format(\n",
    "        train_num, agent.e_greed, eval_reward))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "152e9fff-77b8-4ffb-9035-54f9e2144900",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "80297a49353e01fd30519dbbd85f9d444d6210a697f633f8361ee088cad82092"
  },
  "kernelspec": {
   "display_name": "paddle:Python",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
