{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "\n",
    "from torch import nn\n",
    "\n",
    "\n",
    "class PolicyNetAtt(nn.Module):\n",
    "\n",
    "    def __init__(self,\n",
    "                 input_dim: int,\n",
    "                 policy_dim: int = 2):\n",
    "\n",
    "        super(PolicyNetAtt, self).__init__()\n",
    "        \n",
    "        self.pos_dim=7\n",
    "\n",
    "        self.num_landmark = int((input_dim - self.pos_dim) / 4)\n",
    "\n",
    "        self.agent_pos_fc1_pi = nn.Linear(self.pos_dim, 32)\n",
    "        self.agent_pos_fc2_pi = nn.Linear(32, 32)\n",
    "        self.landmark_fc1_pi = nn.Linear(3, 64)\n",
    "        self.landmark_fc2_pi = nn.Linear(64, 32)\n",
    "        self.info_fc1_pi = nn.Linear(64, 64)\n",
    "        self.action_fc1_pi = nn.Linear(64, 64)\n",
    "        self.action_fc2_pi = nn.Linear(64, policy_dim)\n",
    "\n",
    "        self.relu = nn.ReLU()\n",
    "        self.tanh = nn.Tanh()\n",
    "        self.softmax = nn.Softmax(dim=2)\n",
    "        \n",
    "    def forward(self, observation: torch.Tensor) -> torch.Tensor:\n",
    "        if len(observation.size()) == 1:\n",
    "            observation = observation[None, :]\n",
    "\n",
    "        # compute the policy\n",
    "        # embeddings of agent's position  \n",
    "        agent_pos_embedding = self.relu(self.agent_pos_fc1_pi(observation[:, :self.pos_dim]))\n",
    "        agent_pos_embedding = self.relu(self.agent_pos_fc2_pi(agent_pos_embedding))\n",
    "\n",
    "        # embeddings of landmarkss\n",
    "        estimated_landmark_pos = observation[:, self.pos_dim: self.pos_dim + 3 * self.num_landmark]\n",
    "        \n",
    "        #landmark_info = torch.cat((estimated_landmark_pos.reshape(observation.size()[0], self.num_landmark, 2),\n",
    "        #                        info_vector.reshape(observation.size()[0], self.num_landmark, 2)), 2)\n",
    "        \n",
    "        landmark_reshape=estimated_landmark_pos.reshape(observation.size()[0], self.num_landmark, 3)\n",
    "        #landmark_embedding = self.relu(self.landmark_fc1_pi(landmark_info))\n",
    "        \n",
    "        landmark_embedding = self.relu(self.landmark_fc1_pi(landmark_reshape))\n",
    "        landmark_embedding = self.relu(self.landmark_fc2_pi(landmark_embedding))\n",
    "\n",
    "        # attention\n",
    "        landmark_embedding_tr = torch.transpose(landmark_embedding, 1, 2)\n",
    "\n",
    "        # mask\n",
    "        mask = observation[:, - self.num_landmark:].unsqueeze(1)\n",
    "        attention = torch.matmul(agent_pos_embedding.unsqueeze(1), landmark_embedding_tr) / 4\n",
    "        attention = attention.masked_fill(mask == 0, -1e10)\n",
    "\n",
    "        att = self.softmax(attention)\n",
    "        landmark_embedding_att = self.relu((torch.matmul(att, torch.transpose(landmark_embedding_tr, 1, 2)).squeeze(1)))\n",
    "\n",
    "        info_embedding = self.relu(self.info_fc1_pi(torch.cat((agent_pos_embedding, landmark_embedding_att), 1)))\n",
    "        action = self.tanh(self.action_fc1_pi(info_embedding))\n",
    "        action = self.tanh(self.action_fc2_pi(action))\n",
    "\n",
    "        if action.size()[0] == 1:\n",
    "            action = action.flatten()\n",
    "\n",
    "        #scaled_action = torch.hstack(((1 + action[0]) * 2.0, action[1] * torch.pi/3))\n",
    "        scaled_action=action\n",
    "\n",
    "        return scaled_action"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([37])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pose=torch.zeros(7)\n",
    "n_landmarks=30\n",
    "landmarks=torch.ones(n_landmarks)\n",
    "padding=\n",
    "net_input=torch.cat([pose,landmarks])\n",
    "\n",
    "net_input.shape"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
