{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch import optim\n",
    "import numpy as np\n",
    "import matplotlib as np\n",
    "import gym\n",
    "from torch.nn import functional as F\n",
    "import torch.multiprocessing as mp\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ActorCritic(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(ActorCritic,self).__init__()\n",
    "        self.l1 = nn.Linear(4,25)\n",
    "        self.l2 = nn.Linear(25,50)\n",
    "\n",
    "        self.actor_ln1 = nn.Linear(50,2)\n",
    "\n",
    "        self.l3 = nn.Linear(50,25)\n",
    "        self.critic_ln1 = nn.Linear(25,1)\n",
    "\n",
    "    def forward(self,x):\n",
    "        x = F.normalize(x)\n",
    "        y = F.relu(self.l1(x))\n",
    "        y = F.relu(self.l2(y))\n",
    "        actor = F.log_softmax(self.actor_ln1(y),dim=0)\n",
    "        c = F.relu(self.l3(y.detach()))\n",
    "        critic = torch.tanh(self.critic_ln1(c))\n",
    "        return actor,critic"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "params ={\n",
    "    \"epochs\":1000,\n",
    "    \"n_workers\":7\n",
    "}\n",
    "counter = mp.Value('i',0)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_episode(worker_env,worker_model):\n",
    "    state  = torch.from_numpy(worker_env.env.state).float()\n",
    "    values,logprobs,rewards = [],[],[]\n",
    "    done  = False # 是否完成    \n",
    "    j = 0\n",
    "\n",
    "    while (done == False):\n",
    "        j +=1\n",
    "        policy,value = worker_model(state) # 获取策略和价值　policy 是策略返回每个动作的概率，value ，该网络可以对当前策略的值函数进行估计，也就是可以评价Actor（策略函数）的好坏。\n",
    "        values.append(value)\n",
    "\n",
    "        logits = policy.view(-1)\n",
    "\n",
    "        action_dist = torch.distributions.Categorical(logits=logits)\n",
    "\n",
    "        action  =  action_dist.sample()\n",
    "        logprob_ = policy.view(-1)[action]\n",
    "        logprobs.append(logprob_)\n",
    "        state_,_,done,info = worker_env.step(action.detach().numpy())\n",
    "\n",
    "        state =  torch.from_numpy(state_).float()\n",
    "\n",
    "        if done:\n",
    "            reward = -10\n",
    "            worker_env.reset()\n",
    "        else:\n",
    "            reward = 1.0\n",
    "        rewards.append(reward)\n",
    "    return values , logprobs,rewards"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def update_params(worker_opt,values,logprobs,rewards,clc=0,gamma=0.95):\n",
    "\n",
    "    rewards = torch.tensor(rewards).flip(dims=(0,)).view(-1)\n",
    "    logprobs = torch.stack(logprobs).flip(dims=(0,)).view(-1)\n",
    "    values = torch.stack(values).flip(dims =(0,)).view(-1)\n",
    "\n",
    "    Returns = []\n",
    "\n",
    "    for r in range(rewards.shape[0]):\n",
    "        ret_ = rewards[r]+gamma*ret_\n",
    "        Returns.append(ret_)\n",
    "    Returns = torch.stack(Returns).view(-1)\n",
    "    Returns = F.normalize(Returns,dim=0)\n",
    "    actor_loss = -1*logprobs*(Returns-values.detach())\n",
    "    critic_loss = torch.pow(values-Returns,2)\n",
    "    loss =actor_loss.sum()+clc*critic_loss.sum()\n",
    "    loss.backward()\n",
    "    worker_opt.step()\n",
    "    return actor_loss,critic_loss,len(rewards)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def worker(t,worker_model,counter,params):\n",
    "    worker_env = gym.make(\"CartPole-v1\")\n",
    "    worker_env.reset()\n",
    "    worker_opt = optim.Adam(lr=1e-4,params=worker_model.parameters())\n",
    "    worker_opt.zero_grad()\n",
    "    for i in range(params['epochs']):\n",
    "        worker_opt.zero_grad()\n",
    "        values,logprobs,rewards = run_episode(worker_env,worker_model)\n",
    "        actor_loss ,critic_loss ,eplen = update_params(worker_opt,values,logprobs,rewards)\n",
    "        counter.value = counter.value+1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Process Process-1:\n",
      "Traceback (most recent call last):\n",
      "Process Process-2:\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 315, in _bootstrap\n",
      "    self.run()\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 108, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "  File \"/tmp/ipykernel_879731/488527452.py\", line 8, in worker\n",
      "    values,logprobs,rewards = run_episode(worker_env,worker_model)\n",
      "Traceback (most recent call last):\n",
      "  File \"/tmp/ipykernel_879731/3828825263.py\", line 9, in run_episode\n",
      "    policy,value = worker_model(state)\n",
      "Process Process-3:\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 315, in _bootstrap\n",
      "    self.run()\n",
      "  File \"/tmp/ipykernel_879731/2757626989.py\", line 13, in forward\n",
      "    x = F.normalize(x)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 108, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "Traceback (most recent call last):\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/functional.py\", line 4660, in normalize\n",
      "    denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)\n",
      "  File \"/tmp/ipykernel_879731/488527452.py\", line 8, in worker\n",
      "    values,logprobs,rewards = run_episode(worker_env,worker_model)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 315, in _bootstrap\n",
      "    self.run()\n",
      "Process Process-4:\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/_tensor.py\", line 647, in norm\n",
      "    return torch.norm(self, p, dim, keepdim, dtype=dtype)\n",
      "  File \"/tmp/ipykernel_879731/3828825263.py\", line 9, in run_episode\n",
      "    policy,value = worker_model(state)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 108, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "Traceback (most recent call last):\n",
      "  File \"/tmp/ipykernel_879731/488527452.py\", line 8, in worker\n",
      "    values,logprobs,rewards = run_episode(worker_env,worker_model)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/functional.py\", line 1517, in norm\n",
      "    return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)\n",
      "  File \"/tmp/ipykernel_879731/2757626989.py\", line 13, in forward\n",
      "    x = F.normalize(x)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 315, in _bootstrap\n",
      "    self.run()\n",
      "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)\n",
      "  File \"/tmp/ipykernel_879731/3828825263.py\", line 9, in run_episode\n",
      "    policy,value = worker_model(state)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/functional.py\", line 4660, in normalize\n",
      "    denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/_tensor.py\", line 647, in norm\n",
      "    return torch.norm(self, p, dim, keepdim, dtype=dtype)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 108, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/functional.py\", line 1517, in norm\n",
      "    return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)\n",
      "  File \"/tmp/ipykernel_879731/488527452.py\", line 8, in worker\n",
      "    values,logprobs,rewards = run_episode(worker_env,worker_model)\n",
      "  File \"/tmp/ipykernel_879731/2757626989.py\", line 13, in forward\n",
      "    x = F.normalize(x)\n",
      "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)\n",
      "  File \"/tmp/ipykernel_879731/3828825263.py\", line 9, in run_episode\n",
      "    policy,value = worker_model(state)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/functional.py\", line 4660, in normalize\n",
      "    denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "Process Process-5:\n",
      "  File \"/tmp/ipykernel_879731/2757626989.py\", line 13, in forward\n",
      "    x = F.normalize(x)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/_tensor.py\", line 647, in norm\n",
      "    return torch.norm(self, p, dim, keepdim, dtype=dtype)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/functional.py\", line 4660, in normalize\n",
      "    denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/functional.py\", line 1517, in norm\n",
      "    return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/_tensor.py\", line 647, in norm\n",
      "    return torch.norm(self, p, dim, keepdim, dtype=dtype)\n",
      "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/functional.py\", line 1517, in norm\n",
      "    return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)\n",
      "Traceback (most recent call last):\n",
      "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 315, in _bootstrap\n",
      "    self.run()\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 108, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "  File \"/tmp/ipykernel_879731/488527452.py\", line 8, in worker\n",
      "    values,logprobs,rewards = run_episode(worker_env,worker_model)\n",
      "  File \"/tmp/ipykernel_879731/3828825263.py\", line 9, in run_episode\n",
      "    policy,value = worker_model(state)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/tmp/ipykernel_879731/2757626989.py\", line 13, in forward\n",
      "    x = F.normalize(x)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/functional.py\", line 4660, in normalize\n",
      "    denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)\n",
      "Process Process-6:\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/_tensor.py\", line 647, in norm\n",
      "    return torch.norm(self, p, dim, keepdim, dtype=dtype)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/functional.py\", line 1517, in norm\n",
      "    return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)\n",
      "Traceback (most recent call last):\n",
      "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 315, in _bootstrap\n",
      "    self.run()\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 108, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "  File \"/tmp/ipykernel_879731/488527452.py\", line 8, in worker\n",
      "    values,logprobs,rewards = run_episode(worker_env,worker_model)\n",
      "  File \"/tmp/ipykernel_879731/3828825263.py\", line 9, in run_episode\n",
      "    policy,value = worker_model(state)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/tmp/ipykernel_879731/2757626989.py\", line 13, in forward\n",
      "    x = F.normalize(x)\n",
      "Process Process-7:\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/functional.py\", line 4660, in normalize\n",
      "    denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/_tensor.py\", line 647, in norm\n",
      "    return torch.norm(self, p, dim, keepdim, dtype=dtype)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/functional.py\", line 1517, in norm\n",
      "    return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)\n",
      "Traceback (most recent call last):\n",
      "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 315, in _bootstrap\n",
      "    self.run()\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/multiprocessing/process.py\", line 108, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "  File \"/tmp/ipykernel_879731/488527452.py\", line 8, in worker\n",
      "    values,logprobs,rewards = run_episode(worker_env,worker_model)\n",
      "  File \"/tmp/ipykernel_879731/3828825263.py\", line 9, in run_episode\n",
      "    policy,value = worker_model(state)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\n",
      "    return forward_call(*args, **kwargs)\n",
      "  File \"/tmp/ipykernel_879731/2757626989.py\", line 13, in forward\n",
      "    x = F.normalize(x)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/nn/functional.py\", line 4660, in normalize\n",
      "    denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/_tensor.py\", line 647, in norm\n",
      "    return torch.norm(self, p, dim, keepdim, dtype=dtype)\n",
      "  File \"/home/fd_chen/anaconda3/envs/procthor-rl/lib/python3.9/site-packages/torch/functional.py\", line 1517, in norm\n",
      "    return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)\n",
      "IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 1\n"
     ]
    }
   ],
   "source": [
    "MasterNode = ActorCritic()\n",
    "MasterNode.share_memory()\n",
    "\n",
    "processes = []\n",
    "\n",
    "for i in range(params['n_workers']):\n",
    "    p = mp.Process(target=worker,args=(i,MasterNode,counter,params))\n",
    "    p.start()\n",
    "    processes.append(p)\n",
    "\n",
    "for p in processes:\n",
    "    p.join()\n",
    "\n",
    "for p in processes:\n",
    "    p.terminate()\n",
    "\n",
    "print(counter.value,processes[1].exitcode)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "procthor-rl",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
