{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "# TD3"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "68f5f6f042d43b51"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "import time\n",
    "import gym\n",
    "import torch\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "import numpy as np\n",
    "from collections import deque\n",
    "import random\n",
    "import copy\n",
    "import os\n",
    "import shutil"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "ff9d7dc7ed45a9b8"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class CriticNet(torch.nn.Module):\n",
    "    def __init__(self, env):\n",
    "        super(CriticNet, self).__init__()\n",
    "        # critic1\n",
    "        self.fc1 = torch.nn.Linear(env.observation_space.shape[0] + env.action_space.shape[0], 128)\n",
    "        self.relu1 = torch.nn.ReLU()\n",
    "        self.fc2 = torch.nn.Linear(128, 128)\n",
    "        self.relu2 = torch.nn.ReLU()\n",
    "        self.fc3 = torch.nn.Linear(128, 1)\n",
    "\n",
    "        # critic2\n",
    "        self.fc4 = torch.nn.Linear(env.observation_space.shape[0] + env.action_space.shape[0], 128)\n",
    "        self.relu4 = torch.nn.ReLU()\n",
    "        self.fc5 = torch.nn.Linear(128, 128)\n",
    "        self.relu5 = torch.nn.ReLU()\n",
    "        self.fc6 = torch.nn.Linear(128, 1)\n",
    "\n",
    "    def forward(self, observation, action):\n",
    "        cat_x = torch.cat([observation, action], dim=1)\n",
    "\n",
    "        # critic1\n",
    "        x1 = self.relu1(self.fc1(cat_x))\n",
    "        x1 = self.relu2(self.fc2(x1))\n",
    "        x1 = self.fc3(x1)\n",
    "\n",
    "        # critic2\n",
    "        x2 = self.relu4(self.fc4(cat_x))\n",
    "        x2 = self.relu5(self.fc5(x2))\n",
    "        x2 = self.fc6(x2)\n",
    "\n",
    "        return x1, x2\n",
    "\n",
    "    def Q1(self, observation, action):\n",
    "        cat_x = torch.cat([observation, action], dim=1)\n",
    "\n",
    "        # critic1\n",
    "        x1 = self.relu1(self.fc1(cat_x))\n",
    "        x1 = self.relu2(self.fc2(x1))\n",
    "        x1 = self.fc3(x1)\n",
    "\n",
    "        return x1"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "137104b826319789"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class ActorNet(torch.nn.Module):\n",
    "    def __init__(self, env, max_action):\n",
    "        super(ActorNet, self).__init__()\n",
    "        self.fc1 = torch.nn.Linear(env.observation_space.shape[0], 128)\n",
    "        self.relu1 = torch.nn.ReLU()\n",
    "\n",
    "        self.fc2 = torch.nn.Linear(128, 128)\n",
    "        self.relu2 = torch.nn.ReLU()\n",
    "\n",
    "        self.fc3 = torch.nn.Linear(128, env.action_space.shape[0])\n",
    "        self.tanh = torch.nn.Tanh()\n",
    "\n",
    "        self.max_action = max_action\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.relu1(self.fc1(x))\n",
    "        x = self.relu2(self.fc2(x))\n",
    "        x = self.tanh(self.fc3(x))\n",
    "        x = self.max_action * x\n",
    "        return x"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "510c3a5c5c3d006f"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class TD3:\n",
    "    def __init__(self, env, batch_size=64):\n",
    "        self.critic = CriticNet(env)\n",
    "        self.critic_target = copy.deepcopy(self.critic)\n",
    "        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-4)\n",
    "\n",
    "        self.actor = ActorNet(env, max_action=env.action_space.high[0])\n",
    "        self.actor_target = copy.deepcopy(self.actor)\n",
    "        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=1e-4)\n",
    "\n",
    "        self.max_action = env.action_space.high[0]\n",
    "        self.mse = torch.nn.MSELoss()\n",
    "        self.buffer = deque(maxlen=(10 ** 6))\n",
    "        self.batch_size = batch_size\n",
    "        self.gamma = 0.99\n",
    "        self.tau = 0.005\n",
    "        self.noise_clip = 0.5 * self.max_action\n",
    "        self.policy_noise = 0.2 * self.max_action\n",
    "        self.iter = 0\n",
    "        self.policy_freq = 2\n",
    "        self.env = env\n",
    "\n",
    "    # def choose_action(self, state, explore=True):\n",
    "    #     state = torch.tensor(state, dtype=torch.float32).unsqueeze(0)\n",
    "    #     with torch.no_grad():\n",
    "    #         action = self.actor(state).squeeze(0).numpy()\n",
    "    #     if not explore:\n",
    "    #         return action\n",
    "    #     else:\n",
    "    #         action = np.clip(np.random.normal(action, 1), -2, 2)\n",
    "    #         return action\n",
    "\n",
    "    def choose_action(self, state):\n",
    "        state = torch.tensor(state, dtype=torch.float32).unsqueeze(0)\n",
    "        with torch.no_grad():\n",
    "            action = self.actor(state).squeeze(0).numpy()\n",
    "        return action\n",
    "\n",
    "    def update_target(self):\n",
    "\n",
    "        for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n",
    "            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n",
    "\n",
    "        for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n",
    "            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n",
    "\n",
    "    def learn(self):\n",
    "        self.iter += 1\n",
    "        batch_samples = random.sample(self.buffer, self.batch_size)\n",
    "        state_lst, action_lst, reward_lst, new_state_lst, done_lst = zip(*batch_samples)\n",
    "        state_lst = torch.FloatTensor(state_lst)\n",
    "        action_lst = torch.FloatTensor(action_lst)\n",
    "        reward_lst = torch.FloatTensor(reward_lst)\n",
    "        new_state_lst = torch.FloatTensor(new_state_lst)\n",
    "        done_lst = torch.FloatTensor(done_lst)\n",
    "        # print(state_lst.size())\n",
    "        # print(action_lst.size())\n",
    "        # print(reward_lst.size())\n",
    "        # print(new_state_lst.size())\n",
    "\n",
    "        # 更新critic网络\n",
    "        with torch.no_grad():\n",
    "            noise = torch.clip(torch.randn_like(action_lst) * self.policy_noise, -self.noise_clip, self.noise_clip)\n",
    "            new_action = torch.clip(self.actor_target(new_state_lst) + noise, -self.max_action, self.max_action)\n",
    "\n",
    "        q_target1, q_target2 = self.critic_target(new_state_lst, new_action)\n",
    "        q_target = reward_lst + self.gamma * (torch.min(q_target1, q_target2)) * (1 - done_lst)\n",
    "        q_value1, q_value2 = self.critic(state_lst, action_lst)\n",
    "        td_error = self.mse(q_target, q_value1) + self.mse(q_target, q_value2)\n",
    "        self.critic_optimizer.zero_grad()\n",
    "        td_error.backward()\n",
    "        self.critic_optimizer.step()\n",
    "\n",
    "        # 更新actor网络\n",
    "        if self.iter % self.policy_freq == 0:\n",
    "            action = self.actor(state_lst)\n",
    "            q_value1 = self.critic.Q1(state_lst, action)\n",
    "            loss_actor = -torch.mean(q_value1)  # 寻找最小的loos_actor, 就是寻找最大的torch.mean(q_value), 就是使其q值最大\n",
    "            self.actor_optimizer.zero_grad()\n",
    "            loss_actor.backward()\n",
    "            self.actor_optimizer.step()\n",
    "            self.update_target()\n",
    "\n",
    "    def model_save(self, path):\n",
    "        torch.save({\n",
    "            'actor_model_state_dict': self.actor.state_dict(),\n",
    "            'actor_target_model_state_dict': self.actor_target.state_dict(),\n",
    "            'critic_model_state_dict': self.critic.state_dict(),\n",
    "            'critic_target_model_state_dict': self.critic_target.state_dict(),\n",
    "            'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),\n",
    "            'critic_optimizer_state_dict': self.critic_optimizer.state_dict(),\n",
    "\n",
    "        }, path)\n",
    "\n",
    "    def model_load(self, path):\n",
    "        checkpoint = torch.load(path)\n",
    "        self.actor.load_state_dict(checkpoint['actor_model_state_dict'])\n",
    "        self.actor_target.load_state_dict(checkpoint['actor_target_model_state_dict'])\n",
    "        self.critic.load_state_dict(checkpoint['critic_model_state_dict'])\n",
    "        self.critic_target.load_state_dict(checkpoint['ctitic_target_model_state_dict'])\n",
    "        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])\n",
    "        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state_dict'])"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8d3e0d34c8f77f2d"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "log_dir = './runs'\n",
    "if os.path.exists(log_dir):\n",
    "    try:\n",
    "        os.rmdir(log_dir)\n",
    "        print(f'文件夹 {log_dir} 已成功删除。')\n",
    "    except OSError as error:\n",
    "        print(f'删除文件夹 {log_dir} 失败: {error}')\n",
    "else:\n",
    "    os.makedirs(log_dir)\n",
    "    print(f'文件夹 {log_dir} 不存在，已创建文件夹 {log_dir}。')"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "33c86c0c416b78b",
   "execution_count": null
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "model_save_dir = \"./model_save\"\n",
    "if os.path.exists(model_save_dir):\n",
    "    try:\n",
    "        os.rmdir(model_save_dir)\n",
    "        print(f'文件夹 {model_save_dir} 已成功删除。')\n",
    "    except OSError as error:\n",
    "        print(f'删除文件夹 {model_save_dir} 失败: {error}')\n",
    "else:\n",
    "    os.makedirs(model_save_dir)\n",
    "    print(f'文件夹 {model_save_dir} 不存在，已创建文件夹 {model_save_dir}。')"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "279cc1d04ffd03ba"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "logwriter = SummaryWriter(log_dir=log_dir)\n",
    "env = gym.make(\"BipedalWalker-v3\")\n",
    "batch_size = 256\n",
    "td3 = TD3(env, batch_size)\n",
    "episode = 2000\n",
    "steps = 3000\n",
    "all_reward = []\n",
    "now_epoch = 0\n",
    "expl_noise = 0.25\n",
    "for epoch in range(episode):\n",
    "    start_time = time.time()\n",
    "    state, _ = env.reset()\n",
    "    step = 0\n",
    "    episode_rewards = 0\n",
    "    done = False\n",
    "    expl_noise *= 0.999\n",
    "    while not done:\n",
    "        action = (td3.choose_action(state) + np.random.normal(0, td3.max_action * expl_noise,\n",
    "                                                              size=env.action_space.shape[0])).clip(-td3.max_action,\n",
    "                                                                                                    td3.max_action)\n",
    "\n",
    "        new_state, reward, done, _, _ = env.step(action)\n",
    "        if reward <= -100:\n",
    "            reward = -1\n",
    "            td3.buffer.append([state, action, [reward], new_state, [True]])\n",
    "        else:\n",
    "            td3.buffer.append([state, action, [reward], new_state, [False]])\n",
    "        state = new_state\n",
    "        episode_rewards += reward\n",
    "        # print(\"epoch: {}, step: {}, episode reward: {}\".format(epoch, step, episode_rewards))\n",
    "        step += 1\n",
    "        # if done or step == each_episode:\n",
    "        #     for _ in range(100):\n",
    "        #         td3.learn()\n",
    "        #     break\n",
    "        if step > steps:\n",
    "            break\n",
    "\n",
    "        if len(td3.buffer) > 2000:\n",
    "            td3.learn()\n",
    "    if epoch % 10 == 0:\n",
    "        if not os.path.exists(\"./model_save/\"):\n",
    "            os.makedirs(\"./model_save/\")\n",
    "        td3.model_save('./model_save/{}.pth'.format(epoch))\n",
    "    now_epoch = epoch\n",
    "    all_reward.append(episode_rewards)\n",
    "    logwriter.add_scalar('episode_rewards', episode_rewards, epoch)\n",
    "    print(\"Epoch/Episode: {}/{},reward: {}\".format(epoch + 1, episode, episode_rewards))"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "dbcb91b0b5622bc0"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "env = gym.make(\"BipedalWalker-v3\", render_mode='human')\n",
    "episode_rewards = 0\n",
    "for _ in range(50):\n",
    "    start_time = time.time()\n",
    "    state, _ = env.reset()\n",
    "    step = 0\n",
    "    while True:\n",
    "        a = td3.choose_action(torch.tensor(state))\n",
    "        new_state, reward, done, _, _ = env.step(a)\n",
    "        step += 1\n",
    "        state = new_state\n",
    "        if done:\n",
    "            end_time = time.time()\n",
    "            print(end_time - start_time)\n",
    "            break"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1a8505b2d8e772f5"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "env.close()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a4467a7db8e39694"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "td3.model_save('./model_save/{}.pth'.format(now_epoch))"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "4dd2ccdd3505d81b"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
