{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Chapter 10 - Relational Deep Reinforcement Learning\n",
    "### Deep Reinforcement Learning *in Action*"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Listing 10.1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from matplotlib import pyplot as plt\n",
    "import torch\n",
    "from torch import nn\n",
    "import torchvision as TV\n",
    "\n",
    "mnist_data = TV.datasets.MNIST(\"MNIST/\", train=True, transform=None,\\\n",
    "                                        target_transform=None, download=True) #A\n",
    "mnist_test = TV.datasets.MNIST(\"MNIST/\", train=False, transform=None,\\\n",
    "                                        target_transform=None, download=True) #B\n",
    "\n",
    "\n",
    "def add_spots(x,m=20,std=5,val=1): #C\n",
    "    mask = torch.zeros(x.shape)\n",
    "    N = int(m + std * np.abs(np.random.randn()))\n",
    "    ids = np.random.randint(np.prod(x.shape),size=N)\n",
    "    mask.view(-1)[ids] = val\n",
    "    return torch.clamp(x + mask,0,1)\n",
    "\n",
    "def prepare_images(xt,maxtrans=6,rot=5,noise=10): #D\n",
    "    out = torch.zeros(xt.shape)\n",
    "    for i in range(xt.shape[0]):\n",
    "        img = xt[i].unsqueeze(dim=0)\n",
    "        img = TV.transforms.functional.to_pil_image(img)\n",
    "        rand_rot = np.random.randint(-1*rot,rot,1) if rot > 0 else 0\n",
    "        xtrans,ytrans = np.random.randint(-maxtrans,maxtrans,2)\n",
    "        img = TV.transforms.functional.affine(img, rand_rot, (xtrans,ytrans),1,0)\n",
    "        img = TV.transforms.functional.to_tensor(img).squeeze()\n",
    "        if noise > 0:\n",
    "            img = add_spots(img,m=noise)\n",
    "        maxval = img.view(-1).max()\n",
    "        if maxval > 0:\n",
    "            img = img.float() / maxval\n",
    "        else:\n",
    "            img = img.float()\n",
    "        out[i] = img\n",
    "    return out"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Listing 10.2/10.3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RelationalModule(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(RelationalModule, self).__init__()\n",
    "        self.ch_in = 1\n",
    "        self.conv1_ch = 16 #A\n",
    "        self.conv2_ch = 20\n",
    "        self.conv3_ch = 24\n",
    "        self.conv4_ch = 30\n",
    "        self.H = 28 #B\n",
    "        self.W = 28\n",
    "        self.node_size = 36 #C\n",
    "        self.lin_hid = 100\n",
    "        self.out_dim = 10\n",
    "        self.sp_coord_dim = 2\n",
    "        self.N = int(16**2) #D\n",
    "\n",
    "        self.conv1 = nn.Conv2d(self.ch_in,self.conv1_ch,kernel_size=(4,4))\n",
    "        self.conv2 = nn.Conv2d(self.conv1_ch,self.conv2_ch,kernel_size=(4,4))\n",
    "        self.conv3 = nn.Conv2d(self.conv2_ch,self.conv3_ch,kernel_size=(4,4))\n",
    "        self.conv4 = nn.Conv2d(self.conv3_ch,self.conv4_ch,kernel_size=(4,4))\n",
    "        \n",
    "        self.proj_shape = (self.conv4_ch+self.sp_coord_dim,self.node_size) #E\n",
    "        self.k_proj = nn.Linear(*self.proj_shape)\n",
    "        self.q_proj = nn.Linear(*self.proj_shape)\n",
    "        self.v_proj = nn.Linear(*self.proj_shape)\n",
    "        \n",
    "        self.norm_shape = (self.N,self.node_size)\n",
    "        self.k_norm = nn.LayerNorm(self.norm_shape, elementwise_affine=True) #F\n",
    "        self.q_norm = nn.LayerNorm(self.norm_shape, elementwise_affine=True)\n",
    "        self.v_norm = nn.LayerNorm(self.norm_shape, elementwise_affine=True)\n",
    "        \n",
    "        self.linear1 = nn.Linear(self.node_size, self.node_size)\n",
    "        self.norm1 = nn.LayerNorm([self.N,self.node_size], elementwise_affine=False)\n",
    "        self.linear2 = nn.Linear(self.node_size, self.out_dim)\n",
    "\n",
    "    def forward(self,x):\n",
    "            N, Cin, H, W = x.shape\n",
    "            x = self.conv1(x) \n",
    "            x = torch.relu(x)\n",
    "            x = self.conv2(x) \n",
    "            x = x.squeeze() \n",
    "            x = torch.relu(x) \n",
    "            x = self.conv3(x)\n",
    "            x = torch.relu(x)\n",
    "            x = self.conv4(x)\n",
    "            x = torch.relu(x)\n",
    "\n",
    "            _,_,cH,cW = x.shape\n",
    "            xcoords = torch.arange(cW).repeat(cH,1).float() / cW #G\n",
    "            ycoords = torch.arange(cH).repeat(cW,1).transpose(1,0).float() / cH\n",
    "            spatial_coords = torch.stack([xcoords,ycoords],dim=0)\n",
    "            spatial_coords = spatial_coords.unsqueeze(dim=0)\n",
    "            spatial_coords = spatial_coords.repeat(N,1,1,1) \n",
    "            x = torch.cat([x,spatial_coords],dim=1)\n",
    "            x = x.permute(0,2,3,1)\n",
    "            x = x.flatten(1,2)\n",
    "\n",
    "            K = self.k_proj(x) #H\n",
    "            K = self.k_norm(K) \n",
    "\n",
    "            Q = self.q_proj(x)\n",
    "            Q = self.q_norm(Q) \n",
    "\n",
    "            V = self.v_proj(x)\n",
    "            V = self.v_norm(V) \n",
    "            A = torch.einsum('bfe,bge->bfg',Q,K) #I\n",
    "            A = A / np.sqrt(self.node_size)\n",
    "            A = torch.nn.functional.softmax(A,dim=2) \n",
    "            with torch.no_grad():\n",
    "                self.att_map = A.clone()\n",
    "            E = torch.einsum('bfc,bcd->bfd',A,V) #J\n",
    "            E = self.linear1(E)\n",
    "            E = torch.relu(E)\n",
    "            E = self.norm1(E)  \n",
    "            E = E.max(dim=1)[0]\n",
    "            y = self.linear2(E)  \n",
    "            y = torch.nn.functional.log_softmax(y,dim=1)\n",
    "            return y"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Listing 10.4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "agent = RelationalModule() #A\n",
    "epochs = 1000\n",
    "batch_size=300\n",
    "lr = 1e-3\n",
    "opt = torch.optim.Adam(params=agent.parameters(),lr=lr)\n",
    "lossfn = nn.NLLLoss()\n",
    "for i in range(epochs):\n",
    "    opt.zero_grad()\n",
    "    batch_ids = np.random.randint(0,60000,size=batch_size) #B\n",
    "    xt = mnist_data.train_data[batch_ids].detach()\n",
    "    xt = prepare_images(xt,rot=30).unsqueeze(dim=1) #C\n",
    "    yt = mnist_data.train_labels[batch_ids].detach()\n",
    "    pred = agent(xt)\n",
    "    pred_labels = torch.argmax(pred,dim=1) #D\n",
    "    acc_ = 100.0 * (pred_labels == yt).sum() / batch_size #E\n",
    "    correct = torch.zeros(batch_size,10)\n",
    "    rows = torch.arange(batch_size).long()\n",
    "    correct[[rows,yt.detach().long()]] = 1.\n",
    "    loss = lossfn(pred,yt)\n",
    "    loss.backward()\n",
    "    opt.step()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Listing 10.5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_acc(model,batch_size=500):\n",
    "    acc = 0.\n",
    "    batch_ids = np.random.randint(0,10000,size=batch_size)\n",
    "    xt = mnist_test.test_data[batch_ids].detach()\n",
    "    xt = prepare_images(xt,maxtrans=6,rot=30,noise=10).unsqueeze(dim=1)\n",
    "    yt = mnist_test.test_labels[batch_ids].detach()\n",
    "    preds = model(xt)\n",
    "    pred_ind = torch.argmax(preds.detach(),dim=1)\n",
    "    acc = (pred_ind == yt).sum().float() / batch_size\n",
    "    return acc, xt, yt\n",
    "\n",
    "acc2, xt2, yt2 = test_acc(agent)\n",
    "print(acc2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.imshow(agent.att_map[0].max(dim=0)[0].view(16,16))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Listing 10.6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CNN(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(CNN, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(1,10,kernel_size=(4,4)) #A\n",
    "        self.conv2 = nn.Conv2d(10,16,kernel_size=(4,4))\n",
    "        self.conv3 = nn.Conv2d(16,24,kernel_size=(4,4))\n",
    "        self.conv4 = nn.Conv2d(24,32,kernel_size=(4,4))\n",
    "        self.maxpool1 = nn.MaxPool2d(kernel_size=(2,2)) #B\n",
    "        self.conv5 = nn.Conv2d(32,64,kernel_size=(4,4))\n",
    "        self.lin1 = nn.Linear(256,128)\n",
    "        self.out = nn.Linear(128,10) #C\n",
    "    def forward(self,x):\n",
    "        x = self.conv1(x)\n",
    "        x = nn.functional.relu(x)\n",
    "        x = self.conv2(x)\n",
    "        x = nn.functional.relu(x)\n",
    "        x = self.maxpool1(x)\n",
    "        x = self.conv3(x)\n",
    "        x = nn.functional.relu(x)\n",
    "        x = self.conv4(x)\n",
    "        x = nn.functional.relu(x)\n",
    "        x = self.conv5(x)\n",
    "        x = nn.functional.relu(x)\n",
    "        x = x.flatten(start_dim=1)\n",
    "        x = self.lin1(x)\n",
    "        x = nn.functional.relu(x)\n",
    "        x = self.out(x)\n",
    "        x = nn.functional.log_softmax(x,dim=1) #D\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from einops import rearrange\n",
    "x = torch.randn(5,7,7,3)\n",
    "rearrange(x, \"batch h w c -> batch c h w\").shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Listing 10.7"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MultiHeadRelationalModule(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MultiHeadRelationalModule, self).__init__()\n",
    "        self.conv1_ch = 16 \n",
    "        self.conv2_ch = 20\n",
    "        self.conv3_ch = 24\n",
    "        self.conv4_ch = 30\n",
    "        self.H = 28\n",
    "        self.W = 28\n",
    "        self.node_size = 64\n",
    "        self.lin_hid = 100\n",
    "        self.out_dim = 5\n",
    "        self.ch_in = 3\n",
    "        self.sp_coord_dim = 2\n",
    "        self.N = int(7**2)\n",
    "        self.n_heads = 3\n",
    "        \n",
    "        self.conv1 = nn.Conv2d(self.ch_in,self.conv1_ch,kernel_size=(1,1),padding=0) #A\n",
    "        self.conv2 = nn.Conv2d(self.conv1_ch,self.conv2_ch,kernel_size=(1,1),padding=0)\n",
    "        self.proj_shape = (self.conv2_ch+self.sp_coord_dim,self.n_heads * self.node_size)\n",
    "        self.k_proj = nn.Linear(*self.proj_shape)\n",
    "        self.q_proj = nn.Linear(*self.proj_shape)\n",
    "        self.v_proj = nn.Linear(*self.proj_shape)\n",
    "\n",
    "        self.k_lin = nn.Linear(self.node_size,self.N) #B\n",
    "        self.q_lin = nn.Linear(self.node_size,self.N)\n",
    "        self.a_lin = nn.Linear(self.N,self.N)\n",
    "        \n",
    "        self.node_shape = (self.n_heads, self.N,self.node_size)\n",
    "        self.k_norm = nn.LayerNorm(self.node_shape, elementwise_affine=True)\n",
    "        self.q_norm = nn.LayerNorm(self.node_shape, elementwise_affine=True)\n",
    "        self.v_norm = nn.LayerNorm(self.node_shape, elementwise_affine=True)\n",
    "        \n",
    "        self.linear1 = nn.Linear(self.n_heads * self.node_size, self.node_size)\n",
    "        self.norm1 = nn.LayerNorm([self.N,self.node_size], elementwise_affine=False)\n",
    "        self.linear2 = nn.Linear(self.node_size, self.out_dim)\n",
    "    \n",
    "    def forward(self,x):\n",
    "        N, Cin, H, W = x.shape\n",
    "        x = self.conv1(x) \n",
    "        x = torch.relu(x)\n",
    "        x = self.conv2(x) \n",
    "        x = torch.relu(x) \n",
    "        with torch.no_grad(): \n",
    "            self.conv_map = x.clone() #C\n",
    "        _,_,cH,cW = x.shape\n",
    "        xcoords = torch.arange(cW).repeat(cH,1).float() / cW\n",
    "        ycoords = torch.arange(cH).repeat(cW,1).transpose(1,0).float() / cH\n",
    "        spatial_coords = torch.stack([xcoords,ycoords],dim=0)\n",
    "        spatial_coords = spatial_coords.unsqueeze(dim=0)\n",
    "        spatial_coords = spatial_coords.repeat(N,1,1,1)\n",
    "        x = torch.cat([x,spatial_coords],dim=1)\n",
    "        x = x.permute(0,2,3,1)\n",
    "        x = x.flatten(1,2)\n",
    "        \n",
    "        K = rearrange(self.k_proj(x), \"b n (head d) -> b head n d\", head=self.n_heads)\n",
    "        K = self.k_norm(K) \n",
    "        \n",
    "        Q = rearrange(self.q_proj(x), \"b n (head d) -> b head n d\", head=self.n_heads)\n",
    "        Q = self.q_norm(Q) \n",
    "        \n",
    "        V = rearrange(self.v_proj(x), \"b n (head d) -> b head n d\", head=self.n_heads)\n",
    "        V = self.v_norm(V) \n",
    "        A = torch.nn.functional.elu(self.q_lin(Q) + self.k_lin(K)) #D\n",
    "        A = self.a_lin(A)\n",
    "        A = torch.nn.functional.softmax(A,dim=3) \n",
    "        with torch.no_grad():\n",
    "            self.att_map = A.clone() #E\n",
    "        E = torch.einsum('bhfc,bhcd->bhfd',A,V) #F\n",
    "        E = rearrange(E, 'b head n d -> b n (head d)')\n",
    "        E = self.linear1(E)\n",
    "        E = torch.relu(E)\n",
    "        E = self.norm1(E)\n",
    "        E = E.max(dim=1)[0]\n",
    "        y = self.linear2(E)\n",
    "        y = torch.nn.functional.elu(y)\n",
    "        return y"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Listing 10.8"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gym\n",
    "from gym_minigrid.minigrid import *\n",
    "from gym_minigrid.wrappers import FullyObsWrapper, ImgObsWrapper\n",
    "from skimage.transform import resize\n",
    "\n",
    "def prepare_state(x): #A\n",
    "    ns = torch.from_numpy(x).float().permute(2,0,1).unsqueeze(dim=0)#\n",
    "    maxv = ns.flatten().max()\n",
    "    ns = ns / maxv\n",
    "    return ns\n",
    "\n",
    "def get_minibatch(replay,size): #B\n",
    "    batch_ids = np.random.randint(0,len(replay),size)\n",
    "    batch = [replay[x] for x in batch_ids] #list of tuples\n",
    "    state_batch = torch.cat([s for (s,a,r,s2,d) in batch],)\n",
    "    action_batch = torch.Tensor([a for (s,a,r,s2,d) in batch]).long()\n",
    "    reward_batch = torch.Tensor([r for (s,a,r,s2,d) in batch])\n",
    "    state2_batch = torch.cat([s2 for (s,a,r,s2,d) in batch],dim=0)\n",
    "    done_batch = torch.Tensor([d for (s,a,r,s2,d) in batch])\n",
    "    return state_batch,action_batch,reward_batch,state2_batch, done_batch\n",
    "\n",
    "def get_qtarget_ddqn(qvals,r,df,done): #C\n",
    "    targets = r + (1-done) * df * qvals\n",
    "    return targets"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Listing 10.9"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def lossfn(pred,targets,actions): #A\n",
    "    loss = torch.mean(torch.pow(\\\n",
    "                                targets.detach() -\\\n",
    "                                pred.gather(dim=1,index=actions.unsqueeze(dim=1)).squeeze()\\\n",
    "                                ,2),dim=0)\n",
    "    return loss\n",
    "  \n",
    "def update_replay(replay,exp,replay_size): #B\n",
    "    r = exp[2]\n",
    "    N = 1\n",
    "    if r > 0:\n",
    "        N = 50\n",
    "    for i in range(N):\n",
    "        replay.append(exp)\n",
    "    return replay\n",
    "\n",
    "action_map = { #C\n",
    "    0:0, \n",
    "    1:1,\n",
    "    2:2,\n",
    "    3:3,\n",
    "    4:5,\n",
    "}\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Listing 10.10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from collections import deque\n",
    "env = ImgObsWrapper(gym.make('MiniGrid-DoorKey-5x5-v0')) #A\n",
    "state = prepare_state(env.reset()) \n",
    "GWagent = MultiHeadRelationalModule() #B\n",
    "Tnet = MultiHeadRelationalModule() #C\n",
    "maxsteps = 400 #D\n",
    "env.max_steps = maxsteps\n",
    "env.env.max_steps = maxsteps\n",
    "\n",
    "epochs = 50000\n",
    "replay_size = 9000\n",
    "batch_size = 50\n",
    "lr = 0.0005\n",
    "gamma = 0.99\n",
    "replay = deque(maxlen=replay_size) #E\n",
    "opt = torch.optim.Adam(params=GWagent.parameters(),lr=lr)\n",
    "eps = 0.5\n",
    "update_freq = 100\n",
    "for i in range(epochs):\n",
    "    pred = GWagent(state)\n",
    "    action = int(torch.argmax(pred).detach().numpy())\n",
    "    if np.random.rand() < eps: #F\n",
    "        action = int(torch.randint(0,5,size=(1,)).squeeze())\n",
    "    action_d = action_map[action]\n",
    "    state2, reward, done, info = env.step(action_d)\n",
    "    reward = -0.01 if reward == 0 else reward #G\n",
    "    state2 = prepare_state(state2)\n",
    "    exp = (state,action,reward,state2,done)\n",
    "    \n",
    "    replay = update_replay(replay,exp,replay_size)\n",
    "    if done:\n",
    "        state = prepare_state(env.reset())\n",
    "    else:\n",
    "        state = state2\n",
    "    if len(replay) > batch_size:\n",
    "        \n",
    "        opt.zero_grad()\n",
    "        \n",
    "        state_batch,action_batch,reward_batch,state2_batch,done_batch = get_minibatch(replay,batch_size)\n",
    "        \n",
    "        q_pred = GWagent(state_batch).cpu()\n",
    "        astar = torch.argmax(q_pred,dim=1)\n",
    "        qs = Tnet(state2_batch).gather(dim=1,index=astar.unsqueeze(dim=1)).squeeze()\n",
    "        \n",
    "        targets = get_qtarget_ddqn(qs.detach(),reward_batch.detach(),gamma,done_batch)\n",
    "        \n",
    "        loss = lossfn(q_pred,targets.detach(),action_batch)\n",
    "        loss.backward()\n",
    "        torch.nn.utils.clip_grad_norm_(GWagent.parameters(), max_norm=1.0) #H\n",
    "        opt.step()\n",
    "    if i % update_freq == 0: #I\n",
    "        Tnet.load_state_dict(GWagent.state_dict())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "state_ = env.reset()\n",
    "state = prepare_state(state_)\n",
    "GWagent(state)\n",
    "plt.imshow(env.render('rgb_array'))\n",
    "plt.imshow(state[0].permute(1,2,0).detach().numpy())\n",
    "head, node = 2, 26\n",
    "plt.imshow(GWagent.att_map[0][head][node].view(7,7))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
