{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "initial_id",
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import copy\n",
    "import time\n",
    "import gym\n",
    "import pickle\n",
    "import numpy as np\n",
    "import os\n",
    "import numpy\n",
    "import torch\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from collections import deque\n",
    "import random\n",
    "import shutil"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class Critic(torch.nn.Module):\n",
    "    def __init__(self, input_dims, hidden_dims, output_dims=1):\n",
    "        super(Critic, self).__init__()\n",
    "\n",
    "        # Actor1 architecture\n",
    "        self.head1 = torch.nn.Linear(input_dims, hidden_dims[0])\n",
    "        self.relu_head1 = torch.nn.ReLU()\n",
    "        self.hidden1 = self.build_layers(hidden_dims)\n",
    "        self.out1 = torch.nn.Linear(hidden_dims[-1], output_dims)\n",
    "\n",
    "        # Actor2 architecture\n",
    "        self.head2 = torch.nn.Linear(input_dims, hidden_dims[0])\n",
    "        self.relu_head2 = torch.nn.ReLU()\n",
    "        self.hidden2 = self.build_layers(hidden_dims)\n",
    "        self.out2 = torch.nn.Linear(hidden_dims[-1], output_dims)\n",
    "\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, torch.nn.Linear):\n",
    "                torch.nn.init.xavier_uniform_(m.weight, gain=1)\n",
    "                torch.nn.init.constant_(m.bias, 0)\n",
    "\n",
    "    @staticmethod\n",
    "    def build_layers(hidden_dims: list):\n",
    "        layers = []\n",
    "        for i in range(len(hidden_dims) - 1):\n",
    "            layers.extend([torch.nn.Linear(hidden_dims[i], hidden_dims[i + 1]), torch.nn.ReLU()])\n",
    "        hidden_layer = torch.nn.Sequential(*layers)\n",
    "        return hidden_layer\n",
    "\n",
    "    def forward(self, state, action):\n",
    "        x = torch.cat([state, action], dim=1)\n",
    "\n",
    "        x1 = self.relu_head1(self.head1(x))\n",
    "        x1 = self.hidden1(x1)\n",
    "        x1 = self.out1(x1)\n",
    "\n",
    "        x2 = self.relu_head2(self.head2(x))\n",
    "        x2 = self.hidden2(x2)\n",
    "        x2 = self.out2(x2)\n",
    "        return x1, x2"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "fda7c960d0a9ba95"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class GaussianActor(torch.nn.Module):\n",
    "    def __init__(self, input_dims, hidden_dims, output_dims, act_limit):\n",
    "        super(GaussianActor, self).__init__()\n",
    "        self.head = torch.nn.Linear(input_dims, hidden_dims[0])\n",
    "        self.relu_head = torch.nn.ReLU()\n",
    "        self.hidden = self.build_layers(hidden_dims)\n",
    "        self.out_mean = torch.nn.Linear(hidden_dims[-1], output_dims)\n",
    "        self.out_log_std = torch.nn.Linear(hidden_dims[-1], output_dims)\n",
    "        self.max_action = 1\n",
    "        self.min_action = -1\n",
    "        self.LOG_SIG_MAX = 2\n",
    "        self.LOG_SIG_MIN = -20\n",
    "        self.epsilon = 1e-6\n",
    "        self.act_limit = act_limit\n",
    "\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, torch.nn.Linear):\n",
    "                torch.nn.init.xavier_uniform_(m.weight, gain=1)\n",
    "                torch.nn.init.constant_(m.bias, 0)\n",
    "\n",
    "    @staticmethod\n",
    "    def build_layers(hidden_dims: list):\n",
    "        layers = []\n",
    "        for i in range(len(hidden_dims) - 1):\n",
    "            layers.extend([torch.nn.Linear(hidden_dims[i], hidden_dims[i + 1]), torch.nn.ReLU()])\n",
    "        hidden_layer = torch.nn.Sequential(*layers)\n",
    "        return hidden_layer\n",
    "\n",
    "    def forward(self, observation, deterministic=False, with_logprob=True):\n",
    "        x = self.relu_head(self.head(observation))\n",
    "        x = self.hidden(x)\n",
    "        mu = self.out_mean(x)\n",
    "        log_std = torch.clamp(self.out_log_std(x), self.LOG_SIG_MIN, self.LOG_SIG_MAX)\n",
    "        std = torch.exp(log_std)\n",
    "        pi_distribution = torch.distributions.Normal(mu, std)\n",
    "        if deterministic:\n",
    "            action = mu\n",
    "        else:\n",
    "            action = pi_distribution.rsample()  # mean + std * N(0,1).sample()\n",
    "        if with_logprob:\n",
    "            # 从高斯分布中计算对数概率，然后对Tanh压缩进行修正。\n",
    "            # 注意：修正公式有点神奇。要理解其来源，可以查看原始的SAC论文（arXiv 1801.01290）\n",
    "            # 并查看附录C。这是等式21的一个更为数值稳定的等价形式。\n",
    "            # 尝试自己推导它可能会有点困难。\n",
    "            log_pi = pi_distribution.log_prob(action).sum(dim=1, keepdim=True)\n",
    "            log_pi -= (2 * (np.log(2) - action - torch.nn.functional.softplus(-2 * action))).sum(dim=1, keepdim=True)\n",
    "        else:\n",
    "            log_pi = None\n",
    "        action = torch.FloatTensor([env.action_space.high[0]]) * torch.tanh(action)\n",
    "\n",
    "        return action.squeeze(0), log_pi"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8398281de93f8154"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class SoftActorCritic:\n",
    "    def __init__(self, env):\n",
    "        self.critic = Critic(env.observation_space.shape[0] + env.action_space.shape[0], [128, 128, 128], 1)\n",
    "        self.critic_target = copy.deepcopy(self.critic)\n",
    "        self.hard_update_target()\n",
    "        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-4)\n",
    "\n",
    "        self.actor = GaussianActor(env.observation_space.shape[0], [128, 128, 128], env.action_space.shape[0],\n",
    "                                   env.action_space)\n",
    "        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=1e-4)\n",
    "\n",
    "        self.buffer_memory = deque(maxlen=1000000)\n",
    "        self.batch_size = 256\n",
    "        self.gamma = 0.99\n",
    "        self.tau = 0.001\n",
    "        self.iter = 0\n",
    "        self.policy_freq = 2\n",
    "        self.env = env\n",
    "\n",
    "        # Whether to automatically learn the temperature alpha\n",
    "        self.adaptive_alpha = True\n",
    "        if self.adaptive_alpha:\n",
    "            # Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper\n",
    "            self.target_entropy = -env.action_space.shape[0]\n",
    "            # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0\n",
    "            self.log_alpha = torch.zeros(1, requires_grad=True)\n",
    "            self.alpha = self.log_alpha.exp()\n",
    "            self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=1e-4)\n",
    "        else:\n",
    "            self.alpha = 0.2\n",
    "\n",
    "    def choose_action(self, state, deterministic=False):\n",
    "        state = torch.tensor(state, dtype=torch.float32).unsqueeze(0)\n",
    "        action, log_pi = self.actor(state, deterministic)\n",
    "        return action.squeeze(0).detach().numpy(), log_pi\n",
    "\n",
    "    def soft_update_target(self):\n",
    "        for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n",
    "            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n",
    "\n",
    "    def hard_update_target(self):\n",
    "        for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n",
    "            target_param.data.copy_(param.data)\n",
    "\n",
    "    def learn(self):\n",
    "        self.iter += 1\n",
    "        batch_samples = random.sample(self.buffer_memory, self.batch_size)\n",
    "        batch_state, batch_action, batch_reward, batch_new_state, batch_done = zip(*batch_samples)\n",
    "        batch_state = torch.FloatTensor(np.array(batch_state))\n",
    "        batch_action = torch.FloatTensor(np.array(batch_action))\n",
    "        batch_reward = torch.FloatTensor(np.array(batch_reward))\n",
    "        batch_new_state = torch.FloatTensor(np.array(batch_new_state))\n",
    "        batch_done = torch.FloatTensor(np.array(batch_done))\n",
    "\n",
    "        with torch.no_grad():\n",
    "            next_state_action, next_state_log_pi = self.actor(batch_new_state)\n",
    "            next_q1_target, next_q2_target = self.critic_target(batch_new_state, torch.FloatTensor(next_state_action))\n",
    "            next_q_value = batch_reward + self.gamma * (1 - batch_done) * (\n",
    "                    torch.min(next_q1_target, next_q2_target) - self.alpha * next_state_log_pi)\n",
    "        q1, q2 = self.critic(batch_state, batch_action)\n",
    "        # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]\n",
    "        q_loss = torch.nn.MSELoss()(q1, next_q_value) + torch.nn.MSELoss()(q2, next_q_value)\n",
    "        self.critic_optimizer.zero_grad()\n",
    "        q_loss.backward()\n",
    "        self.critic_optimizer.step()\n",
    "\n",
    "        # Freeze critic networks so you don't waste computational effort\n",
    "        for params in self.critic.parameters():\n",
    "            params.requires_grad = False\n",
    "\n",
    "        action, log_pi = self.actor(batch_state)\n",
    "        q1_pi, q2_pi = self.critic(batch_state, torch.FloatTensor(action))\n",
    "        q_pi = torch.min(q1_pi, q2_pi)\n",
    "        # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]\n",
    "        actor_loss = (self.alpha * log_pi - q_pi).mean()\n",
    "        self.actor_optimizer.zero_grad()\n",
    "        actor_loss.backward()\n",
    "        self.actor_optimizer.step()\n",
    "\n",
    "        # Unfreeze critic networks\n",
    "        for params in self.critic.parameters():\n",
    "            params.requires_grad = True\n",
    "\n",
    "        # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0\n",
    "        if self.adaptive_alpha:\n",
    "            # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0\n",
    "            alpha_loss = -(self.log_alpha.exp() * (log_pi + self.target_entropy).detach()).mean()\n",
    "            self.alpha_optimizer.zero_grad()\n",
    "            alpha_loss.backward()\n",
    "            self.alpha_optimizer.step()\n",
    "            self.alpha = self.log_alpha.exp()\n",
    "\n",
    "        if self.iter % self.policy_freq == 0:\n",
    "            self.soft_update_target()\n",
    "\n",
    "    def model_save(self, epoch, model_folder='./model_save', max_models=5):\n",
    "        if not os.path.exists(model_folder):\n",
    "            os.makedirs(model_folder)\n",
    "        buffer_memery_path = os.path.join(model_folder, 'buffer.pkl')\n",
    "        model_path = os.path.join(model_folder, f'epoch_{epoch}.pth')\n",
    "        with open(buffer_memery_path, 'wb') as file:\n",
    "            pickle.dump(self.buffer_memory, file)\n",
    "        torch.save({\n",
    "            'actor_model_state_dict': self.actor.state_dict(),\n",
    "            'critic_model_state_dict': self.critic.state_dict(),\n",
    "            'critic_target_model_state_dict': self.critic_target.state_dict(),\n",
    "            'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),\n",
    "            'critic_optimizer_state_dict': self.critic_optimizer.state_dict(),\n",
    "        }, model_path)\n",
    "        model_files = [f for f in os.listdir(model_folder) if f.startswith('epoch_') and f.endswith('.pth')]\n",
    "        model_files = sorted(model_files, key=lambda x: int(x.split('_')[-1].split('.')[0]))\n",
    "        while len(model_files) > max_models:\n",
    "            old_model_path = os.path.join(model_folder, model_files[0])\n",
    "            os.remove(old_model_path)\n",
    "            model_files.pop(0)\n",
    "\n",
    "    def model_load(self, model_path, buffer_path):\n",
    "        with open(buffer_path, 'rb') as file:\n",
    "            self.buffer_memory = pickle.load(file)\n",
    "\n",
    "        checkpoint = torch.load(model_path)\n",
    "        self.actor.load_state_dict(checkpoint['actor_model_state_dict'])\n",
    "        self.critic.load_state_dict(checkpoint['critic_model_state_dict'])\n",
    "        self.critic_target.load_state_dict(checkpoint['critic_target_model_state_dict'])\n",
    "        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])\n",
    "        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state_dict'])"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "deee8ae78c6297d4"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "log_dir = './runs'\n",
    "if os.path.exists(log_dir):\n",
    "    try:\n",
    "        shutil.rmtree(log_dir)\n",
    "        print(f'文件夹 {log_dir} 已成功删除。')\n",
    "    except OSError as error:\n",
    "        print(f'删除文件夹 {log_dir} 失败: {error}')\n",
    "else:\n",
    "    os.makedirs(log_dir)\n",
    "    print(f'文件夹 {log_dir} 不存在，已创建文件夹 {log_dir}。')"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "24f936dbcfc8a569"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "model_save_dir = \"./model_save\"\n",
    "if os.path.exists(model_save_dir):\n",
    "    try:\n",
    "        shutil.rmtree(model_save_dir)\n",
    "        print(f'文件夹 {model_save_dir} 已成功删除。')\n",
    "    except OSError as error:\n",
    "        print(f'删除文件夹 {model_save_dir} 失败: {error}')\n",
    "else:\n",
    "    os.makedirs(model_save_dir)\n",
    "    print(f'文件夹 {model_save_dir} 不存在，已创建文件夹 {model_save_dir}。')"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1c79b5278ffff06c"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "summary_writer = SummaryWriter(log_dir=log_dir)\n",
    "env = gym.make(\"BipedalWalkerHardcore-v3\")\n",
    "batch_size = 256\n",
    "sac = SoftActorCritic(env)\n",
    "epochs = 10000\n",
    "steps = 3000\n",
    "all_reward = []\n",
    "now_epoch = 0\n",
    "total_steps = 0\n",
    "start_step = 15000\n",
    "update_frequency = 50\n",
    "for epoch in range(epochs):\n",
    "    start_time = time.time()\n",
    "    state, _ = env.reset()\n",
    "    step = 0\n",
    "    episode_rewards = 0\n",
    "    done = False\n",
    "    while not done:\n",
    "        if total_steps < start_step:\n",
    "            action = env.action_space.sample()\n",
    "        else:\n",
    "            action, _ = sac.choose_action(state)\n",
    "        new_state, reward, done, _, _ = env.step(action)\n",
    "        if reward <= -100:\n",
    "            reward = -1\n",
    "            done = True\n",
    "            sac.buffer_memory.append([state, action, [reward], new_state, [True]])\n",
    "        else:\n",
    "            done = False\n",
    "            sac.buffer_memory.append([state, action, [reward], new_state, [False]])\n",
    "        state = new_state\n",
    "        episode_rewards += reward\n",
    "        step += 1\n",
    "        total_steps += 1\n",
    "        if len(sac.buffer_memory) > 2000:\n",
    "            sac.learn()\n",
    "        if step > steps:\n",
    "            break\n",
    "    if epoch % 10 == 0:\n",
    "        sac.model_save(epoch, model_save_dir)\n",
    "    now_epoch = epoch\n",
    "    end_time = time.time()\n",
    "    all_reward.append(episode_rewards)\n",
    "    summary_writer.add_scalar('episode_rewards', episode_rewards, epoch)\n",
    "    print(\"Epoch/Epochs/Total_Step: {}/{}/{}, Reward: {}, Spent_Time: {}\".format(epoch + 1,\n",
    "                                                                                 epochs,\n",
    "                                                                                 total_steps,\n",
    "                                                                                 episode_rewards,\n",
    "                                                                                 end_time - start_time))"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "280c599e42fd35ad"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "env = gym.make(\"BipedalWalkerHardcore-v3\", render_mode='human')\n",
    "episode_rewards = 0\n",
    "for _ in range(50):\n",
    "    start_time = time.time()\n",
    "    state, _ = env.reset()\n",
    "    step = 0\n",
    "    while True:\n",
    "        a = sac.choose_action(torch.tensor(state))\n",
    "        new_state, reward, done, _, _ = env.step(a)\n",
    "        step += 1\n",
    "        state = new_state\n",
    "        if done:\n",
    "            end_time = time.time()\n",
    "            print(end_time - start_time)\n",
    "            break"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "81da0e30d8966281"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "env.close()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "e0a0f3fe51a74a"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
