{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  115.52 , Episode: 100, Sum Rate: 8.43 Gbps\n",
      "h1: -103.11dB, h2 = -105.19dB, h3 = -103.25dB\n",
      "a1: 21.59%, a2: 42.49%, a3: 35.92%\n",
      "Sum1: 3.44 Gbps, Sum2: 2.17 Gbps, Sum3: 2.83 Gbps\n",
      "Fairness:  70.7 %\n",
      "Height:  36.76 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n",
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  136.32 , Episode: 200, Sum Rate: 11.67 Gbps\n",
      "h1: -99.13dB, h2 = -102.62dB, h3 = -95.32dB\n",
      "a1: 20.38%, a2: 55.56%, a3: 24.06%\n",
      "Sum1: 3.29 Gbps, Sum2: 3.1 Gbps, Sum3: 5.28 Gbps\n",
      "Fairness:  68.43 %\n",
      "Height:  36.03 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n",
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  198.8 , Episode: 300, Sum Rate: 17.07 Gbps\n",
      "h1: -97.12dB, h2 = -101.47dB, h3 = -87.44dB\n",
      "a1: 24.18%, a2: 67.69%, a3: 8.13%\n",
      "Sum1: 5.23 Gbps, Sum2: 3.6 Gbps, Sum3: 8.25 Gbps\n",
      "Fairness:  71.95 %\n",
      "Height:  17.52 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n",
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  209.91 , Episode: 400, Sum Rate: 17.59 Gbps\n",
      "h1: -98.02dB, h2 = -102.02dB, h3 = -86.54dB\n",
      "a1: 18.47%, a2: 73.1%, a3: 8.43%\n",
      "Sum1: 4.31 Gbps, Sum2: 4.12 Gbps, Sum3: 9.16 Gbps\n",
      "Fairness:  73.06 %\n",
      "Height:  16.87 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n",
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  212.54 , Episode: 500, Sum Rate: 17.89 Gbps\n",
      "h1: -98.05dB, h2 = -102.14dB, h3 = -86.5dB\n",
      "a1: 24.25%, a2: 68.18%, a3: 7.58%\n",
      "Sum1: 4.9 Gbps, Sum2: 3.45 Gbps, Sum3: 9.54 Gbps\n",
      "Fairness:  73.89 %\n",
      "Height:  15.62 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n",
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  214.32 , Episode: 600, Sum Rate: 18.17 Gbps\n",
      "h1: -98.01dB, h2 = -102.08dB, h3 = -86.51dB\n",
      "a1: 23.46%, a2: 69.4%, a3: 7.14%\n",
      "Sum1: 4.77 Gbps, Sum2: 3.57 Gbps, Sum3: 9.83 Gbps\n",
      "Fairness:  74.29 %\n",
      "Height:  14.69 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n",
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  212.61 , Episode: 700, Sum Rate: 17.93 Gbps\n",
      "h1: -97.95dB, h2 = -101.99dB, h3 = -86.65dB\n",
      "a1: 21.27%, a2: 71.83%, a3: 6.91%\n",
      "Sum1: 4.56 Gbps, Sum2: 3.8 Gbps, Sum3: 9.56 Gbps\n",
      "Fairness:  75.15 %\n",
      "Height:  17.43 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n",
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  215.13 , Episode: 800, Sum Rate: 18.08 Gbps\n",
      "h1: -98.08dB, h2 = -102.17dB, h3 = -86.59dB\n",
      "a1: 19.54%, a2: 73.85%, a3: 6.6%\n",
      "Sum1: 4.43 Gbps, Sum2: 3.93 Gbps, Sum3: 9.72 Gbps\n",
      "Fairness:  76.41 %\n",
      "Height:  15.87 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n",
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  217.1 , Episode: 900, Sum Rate: 18.06 Gbps\n",
      "h1: -97.96dB, h2 = -102.08dB, h3 = -86.57dB\n",
      "a1: 20.21%, a2: 74.57%, a3: 5.22%\n",
      "Sum1: 4.85 Gbps, Sum2: 3.92 Gbps, Sum3: 9.29 Gbps\n",
      "Fairness:  78.23 %\n",
      "Height:  15.38 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n",
      "****************************************************************************\n",
      "\n",
      "=====================UAV1=====================\n",
      "Reward:  216.88 , Episode: 1000, Sum Rate: 18.03 Gbps\n",
      "h1: -97.99dB, h2 = -102.1dB, h3 = -86.64dB\n",
      "a1: 19.96%, a2: 73.7%, a3: 6.34%\n",
      "Sum1: 4.87 Gbps, Sum2: 3.83 Gbps, Sum3: 9.34 Gbps\n",
      "Fairness:  78.88 %\n",
      "Height:  14.91 m\n",
      "=====================UAV2=====================\n",
      "Sum Rate:  5.69 Gbps\n",
      "h1: -99.52dB, h2: -102.13dB, h3: -102.45dB\n",
      "a1: 9.63%, a2: 23.49%, a3: 66.88%\n",
      "Sum1: 2.69 Gbps, Sum2: 1.5 Gbps, Sum3: 1.5 Gbps\n",
      "Fairness:  91.93 %\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import gym\n",
    "import math\n",
    "import random\n",
    "import numpy as np\n",
    "import matplotlib\n",
    "import matplotlib.pyplot as plt\n",
    "import math\n",
    "from collections import namedtuple\n",
    "from itertools import count\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torch.nn.functional as F\n",
    "import torchvision.transforms as T\n",
    "import cv2\n",
    "import time\n",
    "import os\n",
    "import pickle\n",
    "from PIL import Image\n",
    "import cv2\n",
    "import warnings\n",
    "\n",
    "warnings.filterwarnings(\"ignore\")  \n",
    "\n",
    "torch.manual_seed(0)\n",
    "np.random.seed(0)\n",
    "\n",
    "class DQN(nn.Module):\n",
    "    def __init__(self, NUMBER_OF_ARGUMENTS_PER_STATE, NUM_OF_LAYERS, NUM_OF_NEURONS_PER_LAYER, NUM_OF_ACTIONS):\n",
    "        super().__init__(),\n",
    "        \n",
    "        self.NUM_OF_LAYERS = NUM_OF_LAYERS\n",
    "        \n",
    "        if self.NUM_OF_LAYERS == 0:\n",
    "            self.fc1 = nn.Linear(in_features=NUMBER_OF_ARGUMENTS_PER_STATE, out_features=32)\n",
    "        elif self.NUM_OF_LAYERS == 1:\n",
    "            self.fc1 = nn.Linear(in_features=NUMBER_OF_ARGUMENTS_PER_STATE, out_features=NUM_OF_NEURONS_PER_LAYER)\n",
    "            self.out_v = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=1)\n",
    "            self.out_a = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=32)\n",
    "        elif self.NUM_OF_LAYERS == 2:\n",
    "            self.fc1 = nn.Linear(in_features=NUMBER_OF_ARGUMENTS_PER_STATE, out_features=NUM_OF_NEURONS_PER_LAYER)\n",
    "            self.fc2 = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=NUM_OF_NEURONS_PER_LAYER)\n",
    "            self.out_v = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=1)\n",
    "            self.out_a = nn.Linear(in_features=NUM_OF_NEURONS_PER_LAYER, out_features=NUM_OF_ACTIONS)\n",
    "\n",
    "    def forward(self, t):\n",
    "        \n",
    "        t = t.flatten(start_dim=1)\n",
    "        \n",
    "        if self.NUM_OF_LAYERS == 0:\n",
    "            t = self.fc1(t)\n",
    "            q = t\n",
    "            return q\n",
    "\n",
    "        elif self.NUM_OF_LAYERS == 1:\n",
    "            t = F.relu(self.fc1(t))\n",
    "            v = self.out_v(t) #Value Stream\n",
    "            a = self.out_a(t) # Advantage Stream\n",
    "            q = v + a - a.mean()\n",
    "            return q\n",
    "        \n",
    "        elif self.NUM_OF_LAYERS == 2:\n",
    "            t = F.relu(self.fc1(t))\n",
    "            t = F.relu(self.fc2(t))\n",
    "            v = self.out_v(t) #Value Stream\n",
    "            a = self.out_a(t) # Advantage Stream\n",
    "            q = v + a - a.mean()\n",
    "            return q\n",
    "\n",
    "Experience = namedtuple(\n",
    "            'Experience',\n",
    "            ('state', 'action', 'next_state', 'reward')\n",
    "                        )\n",
    "\n",
    "class ReplayMemory():\n",
    "    def __init__(self, capacity):\n",
    "        self.capacity = capacity\n",
    "        self.memory = []\n",
    "        self.push_count = 0\n",
    "\n",
    "    def push(self, experience):\n",
    "        if len(self.memory) < self.capacity:\n",
    "            self.memory.append(experience)\n",
    "        else:\n",
    "            self.memory[self.push_count % self.capacity] = experience\n",
    "        self.push_count += 1\n",
    "\n",
    "    def sample(self, batch_size):\n",
    "        return random.sample(self.memory, batch_size)\n",
    "\n",
    "    def can_provide_sample(self, batch_size):\n",
    "        return len(self.memory) >= batch_size\n",
    "\n",
    "class EpsilonGreedyStrategy():\n",
    "\n",
    "    def __init__(self, start, end, decay):\n",
    "        self.start = start\n",
    "        self.end = end\n",
    "        self.decay = decay\n",
    "\n",
    "    def get_exploration_rate(self, current_step):\n",
    "        return self.end + (self.start - self.end) * \\\n",
    "                            math.exp(-1. * current_step / self.decay)\n",
    "\n",
    "class Agent():\n",
    "    def __init__(self, strategy, num_actions, device):\n",
    "\n",
    "        self.current_step = 0\n",
    "        self.strategy = strategy\n",
    "        self.num_actions = num_actions\n",
    "        self.device = device\n",
    "\n",
    "    def select_action(self, state, policy_net):\n",
    "        rate = self.strategy.get_exploration_rate(self.current_step)\n",
    "        self.current_step += 1\n",
    "\n",
    "        if rate > random.random():\n",
    "            action = random.randrange(self.num_actions)\n",
    "            return torch.tensor([action]).to(self.device) # explore    \n",
    "        else:\n",
    "            with torch.no_grad():\n",
    "                return policy_net(state).argmax(dim=1).to(self.device) # exploit\n",
    "\n",
    "class QValues():\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "    @staticmethod\n",
    "    def get_current(policy_net, states, actions):\n",
    "        return policy_net(states).gather(dim=1, index=actions.unsqueeze(-1))\n",
    "    @staticmethod        \n",
    "    def get_next(target_net, next_states):                 \n",
    "        return target_net(next_states).max(dim=1)[0].detach()\n",
    "\n",
    "def calc(episode_reward, SUM1, SUM2, SUM3, a1, a2, a3, h1, h2, h3, h2211, h2222, h2233, a2211, a2222, \\\n",
    "         a2233, SUM221, SUM222, SUM223, Fairness, avg2, Fairness2, moving_avg_period, \\\n",
    "         Hl):\n",
    "    \n",
    "    if (episode+1)%100 == 0:\n",
    "        \n",
    "            Fairness = [element * 100 for element in Fairness]\n",
    "            moving_avg_fairness = get_moving_average(moving_avg_period, Fairness)\n",
    "            \n",
    "            moving_avg_hl = get_moving_average(moving_avg_period, Hl)\n",
    "            \n",
    "            moving_avg_SUM1 = get_moving_average(moving_avg_period, SUM1)\n",
    "            moving_avg_SUM2 = get_moving_average(moving_avg_period, SUM2)\n",
    "            moving_avg_SUM1 = [element * 2 for element in moving_avg_SUM1]\n",
    "            moving_avg_SUM2 = [element * 2 for element in moving_avg_SUM2]\n",
    "            moving_avg_SUM3 = get_moving_average(moving_avg_period, SUM3)\n",
    "            moving_avg_SUM3 = [element * 2 for element in moving_avg_SUM3]\n",
    "            \n",
    "            moving_avg_a1 = get_moving_average(moving_avg_period, a1)\n",
    "            moving_avg_a2 = get_moving_average(moving_avg_period, a2)\n",
    "            moving_avg_a3 = get_moving_average(moving_avg_period, a3)\n",
    "            moving_avg_a1 = [element*100 for element in moving_avg_a1]\n",
    "            moving_avg_a2 = [element*100 for element in moving_avg_a2]\n",
    "            moving_avg_a3 = [element*100 for element in moving_avg_a3]\n",
    "            \n",
    "            moving_avg_h1 = get_moving_average(moving_avg_period, h1)\n",
    "            moving_avg_h2 = get_moving_average(moving_avg_period, h2)\n",
    "            moving_avg_h3 = get_moving_average(moving_avg_period, h3)\n",
    "            \n",
    "            moving_avg2 = get_moving_average(moving_avg_period, avg2)\n",
    "            moving_fairnes2 = get_moving_average(moving_avg_period, Fairness2)\n",
    "            \n",
    "            moving_avg_episode_rewards = get_moving_average(moving_avg_period, episode_reward)\n",
    "            \n",
    "            SUM = np.add(moving_avg_SUM1,moving_avg_SUM2)\n",
    "            SUM = np.add(SUM,moving_avg_SUM3)\n",
    "            \n",
    "            h11 = round(10 * math.log10(moving_avg_h1[-1]), 2)\n",
    "            h22 = round(10 * math.log10(moving_avg_h2[-1]), 2)\n",
    "            h33 = round(10 * math.log10(moving_avg_h3[-1]), 2)\n",
    "            \n",
    "            h2211 = round(10 * math.log10(h2211), 2)\n",
    "            h2222 = round(10 * math.log10(h2222), 2)\n",
    "            h2233 = round(10 * math.log10(h2233), 2)\n",
    "            \n",
    "            print(\"****************************************************************************\\n\")\n",
    "            print(\"=====================UAV1=====================\")\n",
    "            print(f\"Reward: \", round( moving_avg_episode_rewards[-1] ,2) ,f\", Episode: {len(SUM1)}, Sum Rate: {round(SUM[-1],2)} Gbps\")\n",
    "            print(f\"h1: {h11}dB, h2 = {h22}dB, h3 = {h33}dB\")\n",
    "            print(f\"a1: {round(moving_avg_a1[-1],2)}%, a2: {round(moving_avg_a2[-1],2)}%, a3: {round(moving_avg_a3[-1],2)}%\")\n",
    "            print(f\"Sum1: {round(moving_avg_SUM1[-1],2)} Gbps, Sum2: {round(moving_avg_SUM2[-1],2)} Gbps, Sum3: {round(moving_avg_SUM3[-1],2)} Gbps\")\n",
    "            print(f\"Fairness: \", round(moving_avg_fairness[-1],2), \"%\")\n",
    "            print(f\"Height: \", round(moving_avg_hl[-1],2), \"m\")\n",
    "            \n",
    "            print(\"=====================UAV2=====================\")\n",
    "            print(f\"Sum Rate: \", round( moving_avg2[-1] , 2) , \"Gbps\")\n",
    "            print(f\"h1: {h2211}dB, h2: {h2222}dB, h3: {h2233}dB\")\n",
    "            print(f\"a1: {round(100*a2211,2)}%, a2: {round(100*a2222,2)}%, a3: {round(100*a2233,2)}%\")\n",
    "            print(f\"Sum1: {round(SUM221,2)} Gbps, Sum2: {round(SUM222,2)} Gbps, Sum3: {round(SUM223,2)} Gbps\")\n",
    "            print(f\"Fairness: \", round(moving_fairnes2[-1]*100,2), \"%\\n\")\n",
    "                  \n",
    "    else:\n",
    "            print(f\"Episode: {len(SUM1)}\", end='\\r')\n",
    "            \n",
    "    if episode == 999:\n",
    "           with open(f\"3-USERS-LOS-SumRate-2GHz.pickle\", \"wb\") as f:\n",
    "                   pickle.dump(SUM, f)\n",
    "           with open(f\"SoA-3-USERS-LOS-SumRate-2GHz.pickle\", \"wb\") as f:\n",
    "                   pickle.dump(moving_avg2, f)\n",
    "           with open(f\"3-USERS-LOS-SumRate1-2GHz.pickle\", \"wb\") as f:\n",
    "                   pickle.dump(moving_avg_SUM1, f)\n",
    "           with open(f\"3-USERS-LOS-SumRate2-2GHz.pickle\", \"wb\") as f:\n",
    "                   pickle.dump(moving_avg_SUM2, f)\n",
    "           with open(f\"3-USERS-LOS-SumRate3-2GHz.pickle\", \"wb\") as f:\n",
    "                   pickle.dump(moving_avg_SUM3, f)\n",
    "           with open(f\"3-USERS-LOS-a1-2GHz.pickle\", \"wb\") as f:\n",
    "                   pickle.dump(moving_avg_a1, f)\n",
    "           with open(f\"3-USERS-LOS-a2-2GHz.pickle\", \"wb\") as f:\n",
    "                   pickle.dump(moving_avg_a2, f)\n",
    "           with open(f\"3-USERS-LOS-a3-2GHz.pickle\", \"wb\") as f:\n",
    "                   pickle.dump(moving_avg_a3, f)\n",
    "            \n",
    "def get_moving_average(period, values):\n",
    "    values = torch.tensor(values, dtype=torch.float)\n",
    "    if len(values) >= period:\n",
    "        moving_avg = values.unfold(dimension=0, size=period, step=1) \\\n",
    "            .mean(dim=1).flatten(start_dim=0)\n",
    "        moving_avg = torch.cat((torch.zeros(period-1), moving_avg))\n",
    "        return moving_avg.numpy()\n",
    "    else:\n",
    "        moving_avg = torch.zeros(len(values))\n",
    "        return moving_avg.numpy()\n",
    "    \n",
    "def mmLineOfSight_Check(D,H):\n",
    "    L = 1\n",
    "    return L\n",
    "    C = 9.6117 # Urban LOS probability parameter \n",
    "    Y = 0.1581 # Urban LOS probability parameter\n",
    "    RAND = random.uniform(0,1)\n",
    "    teta = math.asin(H/D) * 180/math.pi\n",
    "    p1 = 1 / ( 1 + (C * math.exp( -Y * (teta - C ) ) ) )\n",
    "    p2 = 1 - p1\n",
    "    if p1 >= p2:\n",
    "        if RAND >= p2:\n",
    "            L = 1\n",
    "        else:\n",
    "            L = 2\n",
    "    else:\n",
    "        if RAND >= p1:\n",
    "            L = 2\n",
    "        else:\n",
    "            L = 1\n",
    "    return L\n",
    "    \n",
    "def Average(lst): \n",
    "    return sum(lst) / len(lst) \n",
    "\n",
    "def extract_tensors(experiences):\n",
    "    # Convert batch of Experiences to Experience of batches\n",
    "    batch = Experience(*zip(*experiences))\n",
    "\n",
    "    t1 = torch.cat(batch.state)\n",
    "    t2 = torch.cat(batch.action)\n",
    "    t3 = torch.cat(batch.reward)\n",
    "    t4 = torch.cat(batch.next_state)\n",
    "\n",
    "    return (t1,t2,t3,t4)\n",
    "\n",
    "class Blob():\n",
    "    def __init__(self, size, USER1=False, USER2=False, USER3=False, USER4=False):\n",
    "        self.size = size\n",
    "        if USER1:\n",
    "            self.x = 23\n",
    "            self.y = 46\n",
    "        elif USER2:\n",
    "            self.x = 2\n",
    "            self.y = 55\n",
    "        elif USER3:\n",
    "            self.x = 34\n",
    "            self.y = 19\n",
    "        else:\n",
    "            self.x = 50\n",
    "            self.y = 50\n",
    "\n",
    "    def __str__(self):\n",
    "        return f\"Blob({self.x}, {self.y})\"\n",
    "\n",
    "    def __sub__(self, other):\n",
    "        return [(self.x-other.x), (self.y-other.y)]\n",
    "\n",
    "    def __eq__(self, other):\n",
    "        return self.x == other.x and self.y == other.y\n",
    "\n",
    "    def action(self, choice):\n",
    "        \n",
    "\n",
    "        if choice == 0:\n",
    "            self.move(x=1, y=1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 +=0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 1:\n",
    "            self.move(x=-1, y=-1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 +=0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 2:\n",
    "            self.move(x=-1, y=1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 +=0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 3:\n",
    "            self.move(x=1, y=-1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 +=0.01\n",
    "            self.H += 1\n",
    "            \n",
    "        elif choice == 4:\n",
    "            self.move(x=1, y=1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 -=0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 5:\n",
    "            self.move(x=-1, y=-1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 6:\n",
    "            self.move(x=-1, y=1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 7:\n",
    "            self.move(x=1, y=-1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H += 1\n",
    "            \n",
    "        elif choice == 8:\n",
    "            self.move(x=1, y=1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 += 0.01\n",
    "            self.H += 1\n",
    "            \n",
    "        elif choice == 9:\n",
    "            self.move(x=-1, y=-1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 += 0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 10:\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 += 0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 11:\n",
    "            self.move(x=1, y=-1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 += 0.01\n",
    "            self.H += 1\n",
    "            \n",
    "        elif choice == 12:\n",
    "            self.move(x=1, y=1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 13:\n",
    "            self.move(x=-1, y=-1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 14:\n",
    "            self.move(x=-1, y=1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H += 1\n",
    "\n",
    "        elif choice == 15:\n",
    "            self.move(x=1, y=-1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H += 1\n",
    "            \n",
    "        if choice == 16:\n",
    "            self.move(x=1, y=1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 +=0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 17:\n",
    "            self.move(x=-1, y=-1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 +=0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 18:\n",
    "            self.move(x=-1, y=1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 +=0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 19:\n",
    "            self.move(x=1, y=-1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 +=0.01\n",
    "            self.H -= 1\n",
    "            \n",
    "        elif choice == 20:\n",
    "            self.move(x=1, y=1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 -=0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 21:\n",
    "            self.move(x=-1, y=-1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 22:\n",
    "            self.move(x=-1, y=1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 23:\n",
    "            self.move(x=1, y=-1)\n",
    "            self.a1 += 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H -= 1\n",
    "            \n",
    "        elif choice == 24:\n",
    "            self.move(x=1, y=1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 += 0.01\n",
    "            self.H -= 1\n",
    "            \n",
    "        elif choice == 25:\n",
    "            self.move(x=-1, y=-1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 += 0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 26:\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 += 0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 27:\n",
    "            self.move(x=1, y=-1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 += 0.01\n",
    "            self.H -= 1\n",
    "            \n",
    "        elif choice == 28:\n",
    "            self.move(x=1, y=1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 29:\n",
    "            self.move(x=-1, y=-1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 30:\n",
    "            self.move(x=-1, y=1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H -= 1\n",
    "\n",
    "        elif choice == 31:\n",
    "            self.move(x=1, y=-1)\n",
    "            self.a1 -= 0.01\n",
    "            self.a3 -= 0.01\n",
    "            self.H -= 1\n",
    "            \n",
    "        if self.a1 > 1:\n",
    "            self.a1 = 1\n",
    "            self.a3 = 0\n",
    "        elif self.a1 < 0:\n",
    "            self.a1 = 0\n",
    "        if self.a3 > 1:\n",
    "            self.a3 = 1\n",
    "            self.a1 = 0\n",
    "        elif self.a3 < 0:\n",
    "            self.a3 = 0\n",
    "\n",
    "        if self.a1+self.a3 > 1:\n",
    "            \n",
    "            a2 = 2 - self.a1 - self.a3\n",
    "            a = [self.a1, a2, self.a3]\n",
    "            self.a1 /= np.sum(a)\n",
    "            self.a3 /= np.sum(a)\n",
    "\n",
    "        if self.H <= 10:\n",
    "            self.H =10\n",
    "        \n",
    "\n",
    "    def move(self, x=False, y=False):\n",
    "\n",
    "        if not x:\n",
    "            self.x += np.random.randint(-1, 2)\n",
    "        else:\n",
    "            self.x += x\n",
    "\n",
    "        if not y:\n",
    "            self.y += np.random.randint(-1, 2)\n",
    "        else:\n",
    "            self.y += y\n",
    "\n",
    "        if self.x < 0:\n",
    "            self.x = 0\n",
    "        elif self.x > self.size-1:\n",
    "            self.x = self.size-1\n",
    "        if self.y < 0:\n",
    "            self.y = 0\n",
    "        elif self.y > self.size-1:\n",
    "            self.y = self.size-1\n",
    "\n",
    "class BlobEnv():\n",
    "    SIZE = 100\n",
    "    MOVE_PENALTY = 1\n",
    "    OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3)  # 4\n",
    "    UAV_N = 1  # UAV key in dict\n",
    "    USER_N = 2  # USER key in dict\n",
    "    UAV2_N = 4  # UAV2 key in dict\n",
    "    # the dict! (colors)\n",
    "    d = {1: (255, 175, 0),\n",
    "         2: (0, 255, 0),\n",
    "         3: (0, 0, 255),\n",
    "         4: (175, 0, 255)}\n",
    "\n",
    "    def reset(self):\n",
    "        P = 0.1 # Transmitted power 20dbm (i.e. .1w)\n",
    "        N_uav = 8\n",
    "        N_ue = 8\n",
    "        G = N_uav * N_ue\n",
    "        P *= G\n",
    "        W = 2e9 # Bandwidth 2GHz\n",
    "        fc = 28e9 # Carrier frequency = 28GHz\n",
    "        NF = 10**(5/10) # 5dB Noise Figure \n",
    "        TN = 10**(-114/10) # -84dBm Thermal Noise\n",
    "        N = NF * TN\n",
    "        C_LOS = 10**(-6.4)\n",
    "        a_LOS = 2\n",
    "        C_NLOS = 10**(-7.2) \n",
    "        a_NLOS = 2.92\n",
    "\n",
    "        self.UAV = Blob(self.SIZE)\n",
    "        self.UAV2 = Blob(self.SIZE)\n",
    "        self.SUM1 = []\n",
    "        self.SUM2 = []\n",
    "        self.SUM3 = []\n",
    "        self.a111 = []\n",
    "        self.a222 = []\n",
    "        self.a333 = []\n",
    "        self.h111 = []\n",
    "        self.h222 = []\n",
    "        self.h333 = []\n",
    "        self.hl = []\n",
    "        self.Fairness = []\n",
    "        self.ep_rewards = []\n",
    "        \n",
    "        self.UAV.a1 = 0.33\n",
    "        self.UAV.a2 = 0.33\n",
    "        self.UAV.a3 = 0.33\n",
    "        self.UAV.H = 50\n",
    "        \n",
    "        self.USER1 = Blob(self.SIZE, True, False, False, False)\n",
    "        self.USER2 = Blob(self.SIZE, False, True, False, False)\n",
    "        self.USER3 = Blob(self.SIZE, False, False, True, False)\n",
    "        \n",
    "        self.UAV2.x = int((self.USER1.x +self.USER2.x + self.USER3.x )/3)\n",
    "        self.UAV2.y = int((self.USER1.y +self.USER2.y + self.USER3.y )/3)\n",
    "        \n",
    "        ob1 = self.UAV-self.USER1\n",
    "        ob2 = self.UAV-self.USER2\n",
    "        ob3 = self.UAV-self.USER3\n",
    "        \n",
    "        D1 =  np.sum(np.sqrt([(ob1[0])**2, (ob1[1])**2]))\n",
    "        D2 = np.sum(np.sqrt([(ob2[0])**2, (ob2[1])**2]))\n",
    "        D3 = np.sum(np.sqrt([(ob3[0])**2, (ob3[1])**2]))\n",
    "                  \n",
    "        H = self.UAV.H\n",
    "        Dt1 = np.sum(np.sqrt([ (ob1[0])**2, (ob1[1])**2, H**2  ]))\n",
    "        Dt2 = np.sum(np.sqrt([ (ob2[0])**2, (ob2[1])**2, H**2  ]))\n",
    "        Dt3 = np.sum(np.sqrt([ (ob3[0])**2, (ob3[1])**2, H**2  ]))\n",
    "        \n",
    "        self.L1 = mmLineOfSight_Check(Dt1,H)\n",
    "        self.L2 = mmLineOfSight_Check(Dt2,H)\n",
    "        self.L3 = mmLineOfSight_Check(Dt3,H)\n",
    "        \n",
    "        if self.L1 == 1:\n",
    "            h1 = C_LOS * Dt1**(-a_LOS)\n",
    "        else:\n",
    "            h1 = C_NLOS * Dt1**(-a_NLOS)\n",
    "\n",
    "        if self.L2 == 1:\n",
    "            h2 = C_LOS * Dt2**(-a_LOS)\n",
    "        else:\n",
    "            h2 = C_NLOS * Dt2**(-a_NLOS)\n",
    "        if self.L3 == 1:\n",
    "            h3 = C_LOS * Dt3**(-a_LOS)\n",
    "        else:\n",
    "            h3 = C_NLOS * Dt3**(-a_NLOS)\n",
    "\n",
    "        \n",
    "        a1 =  self.UAV.a1\n",
    "        a3 =  self.UAV.a3\n",
    "        a2 =  1 - a1 - a3\n",
    "\n",
    "        observation = ([ob1[0]] + [ob1[1]] + [ob2[0]] + [ob2[1]]+ [ob3[0]] + [ob3[1]] + [a1] + [a3]  + [H])\n",
    "            \n",
    "        self.episode_step = 0\n",
    "\n",
    "        return observation\n",
    "\n",
    "    def step(self, action):\n",
    "        \n",
    "        done= False\n",
    "        \n",
    "        P = 0.1 # Transmitted power 20dbm (i.e. .1w)\n",
    "        N_uav = 8\n",
    "        N_ue = 8\n",
    "        G = N_uav * N_ue\n",
    "        P *= G\n",
    "        W = 2e9 # Bandwidth 2GHz\n",
    "        fc = 28e9 # Carrier frequency = 28GHz\n",
    "        NF = 10**(5/10) # 5dB Noise Figure \n",
    "        TN = 10**(-114/10) # -84dBm Thermal Noise\n",
    "        N = NF * TN\n",
    "        C_LOS = 10**(-6.4)\n",
    "        a_LOS = 2\n",
    "        C_NLOS = 10**(-7.2) \n",
    "        a_NLOS = 2.92        \n",
    "        H = self.UAV.H # antenna Height\n",
    "        self.hl.append(H)\n",
    "        \n",
    "        self.episode_step += 1\n",
    "        \n",
    "        ob1 = self.UAV-self.USER1\n",
    "        ob2 = self.UAV-self.USER2\n",
    "        ob3 = self.UAV-self.USER3\n",
    "        \n",
    "        D1 =  np.sum(np.sqrt([(ob1[0])**2, (ob1[1])**2]))\n",
    "        D2 = np.sum(np.sqrt([(ob2[0])**2, (ob2[1])**2]))\n",
    "        D3 = np.sum(np.sqrt([(ob3[0])**2, (ob3[1])**2]))\n",
    "                  \n",
    "        Dt1 = np.sum(np.sqrt([ (ob1[0])**2, (ob1[1])**2, H**2  ]))\n",
    "        Dt2 = np.sum(np.sqrt([ (ob2[0])**2, (ob2[1])**2, H**2  ]))\n",
    "        Dt3 = np.sum(np.sqrt([ (ob3[0])**2, (ob3[1])**2, H**2  ]))\n",
    "        \n",
    "        self.L1 = mmLineOfSight_Check(Dt1,H)\n",
    "        self.L2 = mmLineOfSight_Check(Dt2,H)\n",
    "        self.L3 = mmLineOfSight_Check(Dt3,H)\n",
    "        \n",
    "        if self.L1 == 1:\n",
    "            h1 = C_LOS * Dt1**(-a_LOS)\n",
    "        else:\n",
    "            h1 = C_NLOS * Dt1**(-a_NLOS)\n",
    "        if self.L2 == 1:\n",
    "            h2 = C_LOS * Dt2**(-a_LOS)\n",
    "        else:\n",
    "            h2 = C_NLOS * Dt2**(-a_NLOS)\n",
    "        if self.L3 == 1:\n",
    "            h3 = C_LOS * Dt3**(-a_LOS)\n",
    "        else:\n",
    "            h3 = C_NLOS * Dt3**(-a_NLOS)\n",
    "        \n",
    "        self.UAV.action(action)\n",
    "        \n",
    "        a1 =  self.UAV.a1\n",
    "        a3 =  self.UAV.a3\n",
    "        a2 =  1 - a1 - a3\n",
    "        \n",
    "        self.a111.append(a1)\n",
    "        self.a222.append(a2)\n",
    "        self.a333.append(a3)\n",
    "        self.h111.append(h1)\n",
    "        self.h222.append(h2)\n",
    "        self.h333.append(h3)\n",
    "\n",
    "        reward = 0\n",
    "        reward_6 = 0\n",
    "        \n",
    "        h1 += 0.000000000001\n",
    "        h2 += 0.000000000002\n",
    "        h3 += 0.000000000003\n",
    "\n",
    "        a1 += 0.000000000001\n",
    "        a2 += 0.000000000002\n",
    "        a3 += 0.000000000003\n",
    "     \n",
    "        SUM1 = 0\n",
    "        SUM2 = 0\n",
    "        SUM3 = 0\n",
    "\n",
    "        dr = {\n",
    "              h1: a1,\n",
    "              h2: a2,\n",
    "              h3: a3\n",
    "             }\n",
    "\n",
    "\n",
    "        dd = {\n",
    "    \n",
    "            a1: SUM1,\n",
    "            a2: SUM2,\n",
    "            a3: SUM3\n",
    "            }\n",
    "\n",
    "        hs = [h1, h2, h3] \n",
    "        hs.sort()\n",
    "        \n",
    "        h11 = hs[0]\n",
    "        a11 = dr[hs[0]]\n",
    "        \n",
    "        h22 = hs[1]\n",
    "        a22 = dr[hs[1]]\n",
    "        \n",
    "        h33 = hs[2]\n",
    "        a33 = dr[hs[2]]\n",
    "        \n",
    "        SNR = P/N\n",
    "\n",
    "        dd[a11] = math.log2( 1 + a11 * SNR * h11 / (h11 * SNR * (a22+a33) + 1) )\n",
    "        dd[a22] = math.log2( 1 + a22 * SNR * h22 / (h22 * SNR * (a33) + 1) )\n",
    "        dd[a33] = math.log2( 1 + a33 * SNR * h33 )\n",
    "\n",
    "        SUM1 = dd[a1]\n",
    "        SUM2 = dd[a2]\n",
    "        SUM3 = dd[a3]\n",
    "\n",
    "        reward_3 = (SUM1 + SUM2 + SUM3)**2 / (3 * (SUM1**2 + SUM2**2 + SUM3**2))\n",
    "        \n",
    "        \n",
    "        self.SUM1.append(SUM1)\n",
    "        self.SUM2.append(SUM2)\n",
    "        self.SUM3.append(SUM3)\n",
    "        self.Fairness.append(reward_3)\n",
    "\n",
    "        reward_3 *= 100\n",
    "        reward_6 += 2e10 * (h1+h2+h3)\n",
    "        reward +=  10*(SUM1 + SUM2 + SUM3)  + reward_3  + reward_6\n",
    "        self.ep_rewards.append(reward)\n",
    "\n",
    "        new_observation_m =  ([ob1[0]] + [ob1[1]] + [ob2[0]] + [ob2[1]]+ [ob3[0]] + [ob3[1]] + [a1]  + [a3]  + [H])\n",
    "        new_observation =  new_observation_m  \n",
    "        \n",
    "        if self.episode_step >= 300:\n",
    "            \n",
    "            r = 1.5\n",
    "            \n",
    "            ob21 = self.UAV2-self.USER1\n",
    "            ob22 = self.UAV2-self.USER2\n",
    "            ob23 = self.UAV2-self.USER3\n",
    "            H2 = 50\n",
    "            \n",
    "            D21 =  np.sum(np.sqrt([(ob21[0])**2, (ob21[1])**2]))\n",
    "            D22 = np.sum(np.sqrt([(ob22[0])**2, (ob22[1])**2]))\n",
    "            D23 = np.sum(np.sqrt([(ob23[0])**2, (ob23[1])**2]))\n",
    "\n",
    "            Dt21 = np.sum(np.sqrt([ (ob21[0])**2, (ob21[1])**2, H2**2  ]))\n",
    "            Dt22 = np.sum(np.sqrt([ (ob22[0])**2, (ob22[1])**2, H2**2  ]))\n",
    "            Dt23 = np.sum(np.sqrt([ (ob23[0])**2, (ob23[1])**2, H2**2  ]))\n",
    "\n",
    "        \n",
    "            h2221 = C_LOS * Dt21**(-a_LOS)\n",
    "            h2222 = C_LOS * Dt22**(-a_LOS)\n",
    "            h2223 = C_LOS * Dt23**(-a_LOS)\n",
    "           \n",
    "            h2221 += 0.000000000001\n",
    "            h2222 += 0.000000000002\n",
    "            h2223 += 0.000000000003\n",
    "            \n",
    "            dd2 = {\n",
    "                h2221 : 0,\n",
    "                h2222 : 0,\n",
    "                h2223 : 0\n",
    "            }\n",
    "            \n",
    "            hs2 = [h2221, h2222, h2223] \n",
    "            hs2.sort()\n",
    "        \n",
    "            h221_1 = hs2[0]\n",
    "            h222_2 = hs2[1]\n",
    "            h223_3 = hs2[2]\n",
    "        \n",
    "            SNR = P/N\n",
    "            a2211 = ((2**r - 1)/2**r) * (1 + N/(P*h221_1))\n",
    "            a2222 = ((2**r - 1)/2**r) * (1 - a2211 + N/(P*h222_2))\n",
    "            a2233 = 1 - a2222 - a2211\n",
    "            \n",
    "            aa2 = {\n",
    "                h221_1 : a2211,\n",
    "                h222_2 : a2222,\n",
    "                h223_3 : a2233\n",
    "            }\n",
    "            \n",
    "            dd2[h221_1] = math.log2( 1 + a2211 * SNR * h221_1 / (h221_1 * SNR * (a2222+a2233) + 1) )\n",
    "            dd2[h222_2] = math.log2( 1 + a2222 * SNR * h222_2 / (h222_2 * SNR * (a2233) + 1) )\n",
    "            dd2[h223_3] = math.log2( 1 + a2233 * SNR * h223_3 )\n",
    "            \n",
    "            SUM221 = dd2[h2221]\n",
    "            SUM222 = dd2[h2222]\n",
    "            SUM223 = dd2[h2223]\n",
    "            \n",
    "            a2_1 = aa2[h2221]\n",
    "            a2_2 = aa2[h2222]\n",
    "            a2_3 = aa2[h2223]\n",
    "\n",
    "\n",
    "            Fairness222 = (SUM221 + SUM222 + SUM223)**2 / (3 * (SUM221**2 + SUM222**2 + SUM223**2))\n",
    "            average_sum_rate2 =  SUM221 + SUM222 + SUM223\n",
    "\n",
    "            \n",
    "            SUM11.append(Average(self.SUM1)) \n",
    "            SUM22.append(Average(self.SUM2)) \n",
    "            SUM33.append(Average(self.SUM3)) \n",
    "            a111.append(Average(self.a111))\n",
    "            a222.append(Average(self.a222))\n",
    "            a333.append(Average(self.a333))\n",
    "            h111.append(Average(self.h111))\n",
    "            h222.append(Average(self.h222))\n",
    "            h333.append(Average(self.h333))\n",
    "            Fairnessl.append( Average(self.Fairness) )\n",
    "            episode_reward.append( Average(self.ep_rewards) )\n",
    "            AVG2.append(average_sum_rate2)\n",
    "            Fairnessl_2.append(Fairness222)\n",
    "            Hl.append(Average(self.hl))\n",
    "\n",
    "            calc(episode_reward, SUM11, SUM22, SUM33, a111, a222, a333, h111, h222, h333, h2221, h2222, h2223, \\\n",
    "                 a2_1, a2_2, a2_3, SUM221, SUM222, SUM223, Fairnessl, AVG2, Fairnessl_2, 100, \\\n",
    "                 Hl)\n",
    "            \n",
    "            done = True\n",
    "                          \n",
    "        return new_observation,new_observation_m, reward, done\n",
    "    \n",
    "    def render(self):\n",
    "        img = self.get_image()\n",
    "        img = img.resize((500, 500)) # resizing\n",
    "        cv2.imshow(\"UAV Beta 1\", np.array(img)) \n",
    "        cv2.waitKey(1)\n",
    "\n",
    "    def get_image(self):\n",
    "        env = np.full((self.SIZE, self.SIZE, 3), 255, dtype=np.uint8)  # starts an rbg img\n",
    "        env[self.USER1.x][self.USER1.y] = self.d[(self.L1+1)]  \n",
    "        env[self.USER2.x][self.USER2.y] = self.d[(self.L2+1)]\n",
    "        env[self.USER3.x][self.USER3.y] = self.d[(self.L3+1)] \n",
    "        env[self.UAV.x][self.UAV.y] = self.d[self.UAV_N]\n",
    "        img = Image.fromarray(env, 'RGB')\n",
    "        return img \n",
    "\n",
    "\n",
    "batch_size = 128\n",
    "gamma = 0.999\n",
    "eps_start = 0.9\n",
    "eps_end = 0.05\n",
    "eps_decay = 200\n",
    "target_update = 10\n",
    "memory_size = 15000\n",
    "lr = 0.001\n",
    "num_episodes = 1000\n",
    "num_of_actions = 32\n",
    "num_of_arg_per_state = 9\n",
    "\n",
    "\n",
    "NUM_OF_LAYERS = [1]\n",
    "NUM_OF_NEURONS_PER_LAYER = [128]\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "                   \n",
    "for num_of_layers in NUM_OF_LAYERS:\n",
    "    for num_of_neurons_per_layer in NUM_OF_NEURONS_PER_LAYER:\n",
    "            \n",
    "        em = BlobEnv()\n",
    "        strategy = EpsilonGreedyStrategy(eps_start, eps_end, eps_decay)\n",
    "        agent = Agent(strategy, num_of_actions, device)\n",
    "        memory = ReplayMemory(memory_size)\n",
    "        policy_net = DQN(num_of_arg_per_state, num_of_layers, num_of_neurons_per_layer, num_of_actions).to(device)\n",
    "        target_net = DQN(num_of_arg_per_state, num_of_layers, num_of_neurons_per_layer, num_of_actions).to(device)\n",
    "        target_net.load_state_dict(policy_net.state_dict())\n",
    "        target_net.eval()\n",
    "        optimizer = optim.Adam(params=policy_net.parameters(), lr=lr)\n",
    "\n",
    "        SUM11 = []\n",
    "        SUM22 = []\n",
    "        SUM33 = []\n",
    "        a111 = []\n",
    "        a222 = []\n",
    "        a333 = []\n",
    "        h111 = []\n",
    "        h222 = []\n",
    "        h333 = []\n",
    "        Fairnessl = []\n",
    "        AVG2 = []\n",
    "        Fairnessl_2 = []\n",
    "        episode_reward = []\n",
    "        Hl = []\n",
    "\n",
    "        for episode in range(num_episodes):\n",
    "            state = torch.tensor([em.reset()], dtype=torch.float32).to(device)\n",
    "            for timestep in count():   \n",
    "                action = agent.select_action(state, policy_net)\n",
    "                next_state, next_state_m, reward, done = em.step(action.item())\n",
    "                reward = torch.tensor([reward], dtype=torch.int64).to(device)\n",
    "                next_state = torch.tensor([next_state], dtype=torch.float32).to(device)\n",
    "                next_state_m = torch.tensor([next_state_m], dtype=torch.float32).to(device)        \n",
    "                memory.push(Experience(state, action, next_state_m, reward))\n",
    "                state = next_state\n",
    "\n",
    "                if memory.can_provide_sample(batch_size):\n",
    "                    experiences = memory.sample(batch_size)\n",
    "                    states, actions, rewards, next_states = extract_tensors(experiences)\n",
    "                    current_q_values = QValues.get_current(policy_net, states, actions)\n",
    "                    next_q_values = QValues.get_next(target_net, next_states)\n",
    "                    target_q_values = (next_q_values * gamma) + rewards\n",
    "                    loss = F.mse_loss(current_q_values, target_q_values.unsqueeze(1))\n",
    "                    optimizer.zero_grad()\n",
    "                    loss.backward()\n",
    "                    optimizer.step()\n",
    "\n",
    "                if done:         \n",
    "                    break\n",
    "                \n",
    "\n",
    "            if episode % target_update == 0:\n",
    "                target_net.load_state_dict(policy_net.state_dict())\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
