{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "RU553Jumx05t"
   },
   "source": [
    "# RELEVAGAN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "CRWaG6qPWj1T"
   },
   "source": [
    "<a id=\"CGAN\"><h1>Import Header</h1></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 4298,
     "status": "ok",
     "timestamp": 1646286467366,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "adrd84AgicLV",
    "outputId": "5efbe38a-5301-44fd-d78b-ccb5e21cd524"
   },
   "outputs": [],
   "source": [
    "# from google.colab import drive \n",
    "# drive.mount('/content/drive')\n",
    "# %cd /content/drive/MyDrive/PhD/Development/code/RELEVAGAN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 1391,
     "status": "ok",
     "timestamp": 1646286468746,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "mTwKRaEVjJ8c",
    "outputId": "919b98ff-2212-4330-ea6a-1b32ba525777"
   },
   "outputs": [],
   "source": [
    "# !pip install tensorflow\n",
    "# !pip install gym\n",
    "# !pip install keras\n",
    "# !pip install keras-rl2\n",
    "\n",
    "# !pip install keras-rl2\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "executionInfo": {
     "elapsed": 24,
     "status": "ok",
     "timestamp": 1646286468748,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "ljvKEMMhp8lR"
   },
   "outputs": [],
   "source": [
    "# %cd /content/drive/My Drive/PhD/Development/code/RELEVAGAN\n",
    "# !ls"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "executionInfo": {
     "elapsed": 4741,
     "status": "ok",
     "timestamp": 1646286473469,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "ZlXu5pxhWj1b",
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/nu/anaconda3/lib/python3.9/site-packages/xgboost/compat.py:36: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n",
      "  from pandas import MultiIndex, Int64Index\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import importlib\n",
    "import header\n",
    "\n",
    "importlib.reload(header) # For reloading after making changes\n",
    "from header import *"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true,
    "id": "aQkaphklFIYW"
   },
   "source": [
    "<a id=\"CGAN\"><h1>Select GAN and Dataset and Flags</h1></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "executionInfo": {
     "elapsed": 34,
     "status": "ok",
     "timestamp": 1646286473470,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "kPfnVUMHWj1h"
   },
   "outputs": [],
   "source": [
    "GAN_type = 'RELEVAGAN_CC'\n",
    "# GAN_type = 'ACGAN_CV'\n",
    "# GAN_type = 'EVAGAN_CV'\n",
    "\n",
    "\n",
    "DATA_SET = 'ISCX-2014'\n",
    "# DATA_SET = 'CIC-2017'\n",
    "#DATA_SET = 'CIC-2018'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "4zsYaqGjWj1j"
   },
   "source": [
    "<a id=\"GPU Settings\"><h2>Set Flags</h2></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "executionInfo": {
     "elapsed": 31,
     "status": "ok",
     "timestamp": 1646286473471,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "S1cf7rM3Wj1k"
   },
   "outputs": [],
   "source": [
    "begin_from_start = 0\n",
    "take_chunk = 0\n",
    "required_epochs = 150\n",
    "\n",
    "DISPLAY_FEATURES = 0\n",
    "EVALUATION_PARAMETER = 'Accuracy'\n",
    "SAVE_ONLY_BOT_DATA = 0\n",
    "USE_KMEANS_FOR_CLASSIFICATION = 1\n",
    "\n",
    "BALANCE_THE_DATASET = 1\n",
    "\n",
    "labels =[]\n",
    "\n",
    "USE_ONLY_TRAIN_SET = 1\n",
    "\n",
    "USE_ALL_CLASSIFIERS = 0\n",
    "\n",
    "ACCU_EVAL_TEST = 0\n",
    "RCL_EVAL_TEST = 0\n",
    "\n",
    "VISUAL_TEST_OVERLAPPING = 1\n",
    "\n",
    "CSV_ONE_BOT = 0\n",
    "\n",
    "VIEW_ALL_BOTS = 0\n",
    "\n",
    "CTU_NERIS = 0\n",
    "\n",
    "SINGLE_WEIGHT_CLASSIFIER_TEST_C2ST = 0\n",
    "SINGLE_WEIGHT_CLASSIFIER_TEST_PROPOSED_METHODOLOGY = 0\n",
    "\n",
    "C2ST_BLACK_BOX_TEST = 0\n",
    "BOTSHOT_BLACK_BOX_TEST = 0\n",
    "\n",
    "C2ST_BLACK_BOX_TEST_AFTER_GAN_TRAINING = 0\n",
    "BOTSHOT_BLACK_BOX_TEST_AFTER_GAN_TRAINING = 0\n",
    "\n",
    "GENERATE_OTHERS_DATA = 1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "gbFYC_WQWj1n"
   },
   "source": [
    "<a id=\"CGAN\"><h1>Set Paths</h1></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "executionInfo": {
     "elapsed": 30,
     "status": "ok",
     "timestamp": 1646286473472,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "8Al8rt2iWj1p"
   },
   "outputs": [],
   "source": [
    "MAIN_CODE_PATH = os.getcwd()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "executionInfo": {
     "elapsed": 27,
     "status": "ok",
     "timestamp": 1646286473472,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "WrizUpKsYxel"
   },
   "outputs": [],
   "source": [
    "DATA_SET_PATH = MAIN_CODE_PATH + '/Dataset/' +  DATA_SET + '/'\n",
    "CACHE_PATH = MAIN_CODE_PATH + '/cache/' + GAN_type + '/'\n",
    "FIGS_PATH = MAIN_CODE_PATH  + '/figs/' + GAN_type + '/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 27,
     "status": "ok",
     "timestamp": 1646286473473,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "yT1YS8U9Wj1t",
    "outputId": "e4079158-267c-4b8c-f6c5-241c2570f61b",
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/nu/Insync/rhr407@gmail.com/Google Drive/PhD/Development/code/RELEVAGAN_DQN_Agent/Dataset/ISCX-2014/\n",
      "/home/nu/Insync/rhr407@gmail.com/Google Drive/PhD/Development/code/RELEVAGAN_DQN_Agent/cache/RELEVAGAN_CC/\n",
      "/home/nu/Insync/rhr407@gmail.com/Google Drive/PhD/Development/code/RELEVAGAN_DQN_Agent/figs/RELEVAGAN_CC/\n"
     ]
    }
   ],
   "source": [
    "print(DATA_SET_PATH)\n",
    "print(CACHE_PATH)\n",
    "print(FIGS_PATH)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "IwGuWbiSWj1y"
   },
   "source": [
    "<a id=\"GPU Settings\"><h2>Check Available GPUs</h2></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 24,
     "status": "ok",
     "timestamp": 1646286473474,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "jwQXSblnWj10",
    "outputId": "6dc71bbf-2f29-444d-83ae-66148060c55e"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Num GPUs Available:  3\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "print(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "ZzSPR7q5Wj11"
   },
   "source": [
    "<a id=\"GPU Settings\"><h2>Import Dataset</h2></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 21,
     "status": "ok",
     "timestamp": 1646286473475,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "w0MQAWmiWj12",
    "outputId": "82533b26-0223-4159-9f62-b488faf6f8c0"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/nu/Insync/rhr407@gmail.com/Google Drive/PhD/Development/code/RELEVAGAN_DQN_Agent/Dataset/ISCX-2014\n",
      " ISCX_Botnet-Training.pcap_Flow.csv\r\n",
      "'ISCX_Botnet-Training.pcap_Flow.csv_VIRUT_(Preprocessed).csv'\r\n"
     ]
    }
   ],
   "source": [
    "%cd $DATA_SET_PATH\n",
    "!ls"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 5361,
     "status": "ok",
     "timestamp": 1646286478822,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "NIJpUfZdWj15",
    "outputId": "f289caa0-4318-4fca-f55a-3a16e3bd78e8",
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dataset Imported: ISCX-2014\n",
      "Training set: (248677, 61)\n"
     ]
    }
   ],
   "source": [
    "if begin_from_start:        \n",
    "\n",
    "    if DATA_SET == 'ISCX-2014':\n",
    "        training_data = prepare_ISCX_2014_data(PATH = DATA_SET_PATH, INPUT_FILE_NAME = r'ISCX_Botnet-Training.pcap_Flow.csv')       \n",
    "    \n",
    "    elif DATA_SET == 'CIC-2017':\n",
    "        training_data = prepare_cic_2017_data(PATH = DATA_SET_PATH, INPUT_FILE_NAME = r'CIC_Friday_bot.csv')\n",
    "                \n",
    "    elif DATA_SET == 'CIC-2018':\n",
    "        training_data = prepare_cic_2018_data(PATH = DATA_SET_PATH, INPUT_FILE_NAME = r'Friday-02-03-2018_TrafficForML_CICFlowMeter.csv')\n",
    "                \n",
    "    elif DATA_SET == 'UNSW_BotIoT':\n",
    "        training_data = prepare_UNSW_IoT(PATH = DATA_SET_PATH, INPUT_FILE_NAME = r'UNSW_2018_IoT_Botnet_Final_10_best_Training.csv')\n",
    "                \n",
    "    elif DATA_SET == 'Darknet':\n",
    "        training_data = prepare_DARKNET_2020_data(PATH = DATA_SET_PATH, INPUT_FILE_NAME = r'Darknet.csv')\n",
    "        \n",
    "\n",
    "    print('Dataset preprocessed: ' + DATA_SET)\n",
    "    \n",
    "else:\n",
    "\n",
    "    if DATA_SET == 'ISCX-2014':\n",
    "        INPUT_TRAINING_FILE_NAME = r'ISCX_Botnet-Training.pcap_Flow.csv_VIRUT'        \n",
    "        \n",
    "    elif DATA_SET == 'CIC-2017':\n",
    "        INPUT_TRAINING_FILE_NAME = r'CIC_Friday_bot.csv'  \n",
    "        \n",
    "    elif DATA_SET == 'CIC-2018':\n",
    "        INPUT_TRAINING_FILE_NAME = r'Friday-02-03-2018_TrafficForML_CICFlowMeter.csv'       \n",
    "        \n",
    "    elif DATA_SET == 'BoT-IoT':\n",
    "        INPUT_TRAINING_FILE_NAME = r'UNSW_2018_IoT_Botnet_Final_10_best_Training.csv'       \n",
    "        \n",
    "    elif DATA_SET == 'Drebin':\n",
    "        INPUT_TRAINING_FILE_NAME = r'Drebin_API_Dataset.csv'                \n",
    "        \n",
    "    elif DATA_SET == 'Darknet':\n",
    "        INPUT_TRAINING_FILE_NAME = r'Darknet.csv'        \n",
    "\n",
    "    training_data = pd.read_csv (INPUT_TRAINING_FILE_NAME + '_(Preprocessed).csv', low_memory=False)\n",
    "    training_data= training_data.drop(['Unnamed: 0'], axis=1)\n",
    "    \n",
    "    print('Dataset Imported: ' + DATA_SET)\n",
    "    print('Training set: '+ str(training_data.shape)) \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 1432,
     "status": "ok",
     "timestamp": 1646286480246,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "eg83hsWRWj16",
    "outputId": "b66f50cf-7738-4ab5-8c7b-e1dcaa61ddff"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "        FlowDuration  TotalFwdPacket  TotalBwdpackets  TotalLengthofFwdPacket  \\\n",
      "count  248677.000000   248677.000000    248677.000000           248677.000000   \n",
      "mean        0.113036        0.000844         0.000654                0.000143   \n",
      "std         0.252565        0.010604         0.009114                0.007379   \n",
      "min         0.000000        0.000000         0.000000                0.000000   \n",
      "25%         0.000005        0.000067         0.000000                0.000000   \n",
      "50%         0.004006        0.000133         0.000034                0.000005   \n",
      "75%         0.058283        0.000200         0.000168                0.000016   \n",
      "max         1.000000        1.000000         1.000000                1.000000   \n",
      "\n",
      "       TotalLengthofBwdPacket  FwdPacketLengthMax  FwdPacketLengthMin  \\\n",
      "count           248677.000000       248677.000000       248677.000000   \n",
      "mean                 0.000405            0.053490            0.004718   \n",
      "std                  0.007547            0.132236            0.017245   \n",
      "min                  0.000000            0.000000            0.000000   \n",
      "25%                  0.000000            0.000000            0.000000   \n",
      "50%                  0.000000            0.019178            0.000000   \n",
      "75%                  0.000019            0.054795            0.000000   \n",
      "max                  1.000000            1.000000            1.000000   \n",
      "\n",
      "       FwdPacketLengthMean  FwdPacketLengthStd  BwdPacketLengthMax  ...  \\\n",
      "count        248677.000000       248677.000000       248677.000000  ...   \n",
      "mean              0.018487            0.027984            0.019626  ...   \n",
      "std               0.062056            0.079239            0.030496  ...   \n",
      "min               0.000000            0.000000            0.000000  ...   \n",
      "25%               0.000000            0.000000            0.000000  ...   \n",
      "50%               0.003861            0.000000            0.000000  ...   \n",
      "75%               0.019863            0.033555            0.029189  ...   \n",
      "max               1.000000            1.000000            0.538462  ...   \n",
      "\n",
      "       SubflowBwdBytes  FWDInitWinBytes  BwdInitWinBytes  FwdActDataPkts  \\\n",
      "count    248677.000000    248677.000000    248677.000000   248677.000000   \n",
      "mean          0.115349         0.353742         0.116531        0.000450   \n",
      "std           0.211077         0.349682         0.255402        0.007065   \n",
      "min           0.000000         0.000000         0.000000        0.000000   \n",
      "25%           0.000000         0.089113         0.000000        0.000000   \n",
      "50%           0.000000         0.250004         0.000000        0.000000   \n",
      "75%           0.108621         0.267338         0.089113        0.000169   \n",
      "max           1.000000         1.000000         1.000000        1.000000   \n",
      "\n",
      "       FwdSegSizeMin      IdleMean        IdleStd       IdleMax       IdleMin  \\\n",
      "count  248677.000000  2.486770e+05  248677.000000  2.486770e+05  2.486770e+05   \n",
      "mean        0.466578  4.716686e-01       0.026932  4.797264e-01  4.141199e-01   \n",
      "std         0.120792  4.793791e-01       0.122364  4.852010e-01  4.799477e-01   \n",
      "min         0.000000  0.000000e+00       0.000000  0.000000e+00  0.000000e+00   \n",
      "25%         0.500000  0.000000e+00       0.000000  0.000000e+00  0.000000e+00   \n",
      "50%         0.500000  6.000000e-08       0.000000  6.000000e-08  1.000000e-08   \n",
      "75%         0.500000  9.715268e-01       0.000000  9.715348e-01  9.715268e-01   \n",
      "max         1.000000  1.000000e+00       1.000000  9.999999e-01  1.000000e+00   \n",
      "\n",
      "               Label  \n",
      "count  248677.000000  \n",
      "mean        0.992971  \n",
      "std         0.083545  \n",
      "min         0.000000  \n",
      "25%         1.000000  \n",
      "50%         1.000000  \n",
      "75%         1.000000  \n",
      "max         1.000000  \n",
      "\n",
      "[8 rows x 61 columns]\n"
     ]
    }
   ],
   "source": [
    "training_data = training_data.replace([np.inf, -np.inf], np.nan).dropna(how=\"any\").reset_index(drop=True)\n",
    "print(training_data.describe())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "h5phnoDzWj17"
   },
   "source": [
    "<a id=\"GPU Settings\"><h2>Display Features</h2></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "executionInfo": {
     "elapsed": 39,
     "status": "ok",
     "timestamp": 1646286480247,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "aePYFbaoWj17",
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "if DISPLAY_FEATURES: \n",
    "    unified_df = training_data.copy()\n",
    "    X_cols = unified_df.columns[:-1]\n",
    "    y_cols = unified_df.columns[-1]\n",
    "\n",
    "\n",
    "\n",
    "    axarr = [[]]*len(X_cols)\n",
    "    columns = 4\n",
    "    rows = int( np.ceil( len(X_cols) / columns ) )\n",
    "    f, fig = plt.subplots( figsize=(columns*2.5, rows*2) )\n",
    "\n",
    "    f.suptitle('Data Distributions by Feature and Label', size=16)\n",
    "\n",
    "    for i, col in enumerate(X_cols[:]):\n",
    "        axarr[i] = plt.subplot2grid( (int(rows), int(columns)), (int(i//columns), int(i%columns)) )\n",
    "\n",
    "\n",
    "        axarr[i].hist( unified_df.loc[ unified_df.Label == 0, col ] , label=['Normal'], color=('#009933'), alpha=0.5,\n",
    "                              bins=np.linspace( np.percentile(unified_df[col],0), np.percentile(unified_df[col],100),50 ),\n",
    "                              density=True )\n",
    "\n",
    "        axarr[i].hist( unified_df.loc[ unified_df.Label == 1, col ] , label=['Real Bot'], color=['#FF0000'], alpha=0.5,\n",
    "                              bins=np.linspace( np.percentile(unified_df[col],0), np.percentile(unified_df[col],100),50 ),\n",
    "                              density=True )\n",
    "\n",
    "        axarr[i].set_xlabel(col, size=12)\n",
    "    #     axarr[i].set_ylim([0,1])\n",
    "        axarr[i].tick_params(axis='both', labelsize=10)\n",
    "        if i == 0: \n",
    "            legend = axarr[i].legend()\n",
    "            legend.get_frame().set_facecolor('white')\n",
    "        if i%4 != 0 : \n",
    "            axarr[i].tick_params(axis='y', left=True, labelleft=True)\n",
    "        else:\n",
    "            axarr[i].set_ylabel('Fraction',size=12)\n",
    "\n",
    "    plt.tight_layout(rect=[0,0,1,0.95]) # xmin, ymin, xmax, ymax\n",
    "    # plt.savefig('plots/Engineered_Data_Distributions.png')\n",
    "\n",
    "    plt.show()\n",
    "    \n",
    "# else: \n",
    "#     print('Pair Plotting..')\n",
    "# #     sns.pairplot(training_data, hue=\"Label\")\n",
    "    \n",
    "#     sns.pairplot(training_data, vars=['Flow Duration', 'Total Fwd Packet', 'Total Bwd packets',\n",
    "#        'Total Length of Fwd Packet', 'Total Length of Bwd Packet'], hue=\"Label\")\n",
    "    \n",
    "#     sns.pairplot(penguins, hue=\"species\", markers=[\"o\", \"s\", \"D\"])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "pD-Hq093Wj18"
   },
   "source": [
    "<a id=\"GPU Settings\"><h2>Select Botnet</h2></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 36,
     "status": "ok",
     "timestamp": 1646286480248,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "jMdckAGdWj19",
    "outputId": "46fe73e9-a31e-4ad2-ad93-1365278c0e6f",
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Normal before chunk: (246929, 61)\n",
      "Real Bots before chunk: (1748, 61)\n",
      "Normal: (246929, 61)\n",
      "Real Bots: (1748, 61)\n"
     ]
    }
   ],
   "source": [
    "normal = training_data.loc[ training_data['Label']==1 ].copy()\n",
    "bots = training_data.loc[ training_data['Label']==0 ].copy()\n",
    "\n",
    "print('Normal before chunk: ' + str(normal.shape))    \n",
    "print('Real Bots before chunk: ' + str(bots.shape)) \n",
    "\n",
    "if take_chunk:\n",
    "    bots = bots[0:512]\n",
    "    \n",
    "print('Normal: ' + str(normal.shape))    \n",
    "print('Real Bots: ' + str(bots.shape)) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "executionInfo": {
     "elapsed": 23,
     "status": "ok",
     "timestamp": 1646286480249,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "WEv9Tz5AWj1-"
   },
   "outputs": [],
   "source": [
    "Train = training_data.copy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 22,
     "status": "ok",
     "timestamp": 1646286480250,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "8TrvNDY3Wj1-",
    "outputId": "fe3d432c-995f-469a-e6e6-b26bacf6ccb7",
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0    1748\n",
      "Name: count, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "bots_count =  pd.DataFrame( [ [np.sum(bots['Label']==i)] for i in np.unique(bots['Label']) ], columns=['count'], index=np.unique(bots['Label']) )\n",
    "\n",
    "label_cols = [ i for i in bots.columns if 'Label' in i ]\n",
    "data_cols = [ i for i in bots.columns if i not in label_cols ]\n",
    "\n",
    "train_no_label = bots[ data_cols ].reset_index(drop=True)\n",
    "\n",
    "print(bots_count['count'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "executionInfo": {
     "elapsed": 766,
     "status": "ok",
     "timestamp": 1646286481004,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "uKyFTGYfWj1_"
   },
   "outputs": [],
   "source": [
    "train_data = bots"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "K_5UjRLxWj2B"
   },
   "source": [
    "<a id=\"Classification\"><h1>Classification</h1></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 221
    },
    "executionInfo": {
     "elapsed": 60,
     "status": "ok",
     "timestamp": 1646286481006,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "hPMsTK_pWj2B",
    "outputId": "69298b35-df4a-4b68-9100-369f528e2fdb",
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1748\n",
      "CPU times: user 1.9 s, sys: 145 ms, total: 2.04 s\n",
      "Wall time: 279 ms\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOAAAACYCAYAAAD9XOVNAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/YYfK9AAAACXBIWXMAAAsTAAALEwEAmpwYAAApMUlEQVR4nO2dd3hUVfrHP1PTE5JACqEYeoegFA1tIRgEAtIEQToKAitBiiKiqFRhwf3hAi4CuyJKUZouIIuCIE0IRbMBAWlJSO9lJtPu74+YgZCZTBKSSfF8nofnyT3n3Hvfe7nfOeee+573lUmSJCEQCCoFeWUbIBD8mRECFAgqESFAgaASEQIUCCoRIUCBoBIRAhQIKhEhQIGgEhECrAb07t2bzZs3Fyo7ceIE7du35+DBg+zZs4fmzZuzcuVKi/svXLiQ5s2bc+7cOXuYKygFQoDVkPPnzzNr1izee+89+vfvD0CdOnU4cOAABoOhUFutVsvRo0epVatWJVgqsIUQYDXjl19+Ydq0abz11ls8//zz5vLAwEBcXFw4ceJEofZHjx6lVatWuLi4FCr/4osvCA0NpX379oSGhnL06FFznU6nY9myZfTs2ZOgoCD69+/P4cOHzfXr1q1j0qRJ7Nq1i7/85S907NiR1157jZycHABSU1N57bXX6NKlC0FBQQwbNoyzZ89WwN2o/ggBViN+++03Xn75ZV5//XVGjBhRpH7gwIHs3bu3UNnevXsZOHBgobLt27ezYcMG1q5dy8WLF3nrrbeYPXs2v/76KwBbtmzhv//9Lzt37iQiIoKJEycyd+5c4uLizMeIiorizp075iHwyZMn2bNnDwBr164lJyeH77//nvPnzzN8+HDmz59fpHcWCAFWG+7cucOkSZPw9fVl1KhRFtsMGTKE48ePk5qaCkBCQgKXL1+mX79+hdrt2LGDcePG0apVKxQKBT179qRXr15m8U6ePJn9+/fj5+eHXC5n8ODB6PV6rl27Zj5GXl4e4eHhODk58cQTT9C2bVtu3rwJQGZmJkqlErVajVKp5MUXX+TEiRMolcqKuDXVGiHAasI333zDtGnTyMjIYPny5Rbb1K9fn6CgIL755hsADhw4QEhISJHh5507d/j73/9O27Ztzf+OHTvG/fv3gXwBvffeezzzzDO0adOGJ598EsgXXQH+/v6o1WrztpOTk7n+lVde4dq1a3Tv3p3XX3+db7/9VvR+VhA/SdWE6dOnM3bsWIKCghgzZgyBgYGMGTOmSLthw4axdetWxo8fz759+3jnnXeKtHFwcGDBggUMGzbM4rnCw8PJy8tjx44d1K9fH5PJRKtWrQq1kcut/3a3bt2ao0ePcvbsWU6cOMHSpUv5/PPP2b59OwqFopRXXrMRPWA1oeDBbdOmDcuWLWPZsmX89NNPRdqFhoYSExPDoUOH0Gg0dO7cuUibhg0bcvXq1UJl9+/fx2g0AnD58mVeeOEFGjRogEwm48qVK6WyNTMzE4Du3buzcOFCdu3axaVLlwoNYQX5CAFWQwYMGMCkSZMIDw83v3cV4OjoSP/+/Vm1ahVDhgxBJpMV2X/MmDF8/fXXnDp1CoPBwKVLlxg6dCjHjh0D8oeyV65cQa/XExkZyebNm3FzcyMhIaFE9r3wwgt89NFH5ObmYjKZuHLlCg4ODtStW/fxL76GIYag1ZTZs2dz48YNpk6dyosvvliobvjw4ezatavQZ4qHGTJkCElJSSxcuJDU1FT8/f2ZNWsWISEhACxevJhFixbRqVMnWrVqxdKlS9mxYwd/+9vfcHZ2tmnb3//+d5YsWUK3bt2A/E8k69atw9PT8/EuugYiEyviBYLKQwxBBYJKRAhQIKhEhAAFgkpECFAgqESq5SyoVqslMjKSOnXqiA+7giqH0WgkKSmJNm3a4OjoWGzbainAyMhIi14gAkFVYvv27Tz11FPFtqmWAqxTpw6Qf4F+fn6PdSx99B2y9m3HlJWBZDQiZWVgTE0GhRKlfz08X52P0ld8QBaUnPj4eMaMGWN+ToujWgqwYNjp5+dHvXr1ynwc3d3fSf5kBcqk+EdOAGCAuDuov9yI7+otZTdW8KelJK9Hf+pJmMzPP8H4qPgewRAXm98jCgQVwJ9agMbMdNuN5DIoxvNfIHgcbD5Z//rXvyyWWwsAVJ1QeNseo6sCGqKo5WUHawR/Rqy+A16/fp1r166xZcsWvL29C9VlZmayY8cO3njjjQo3sCLxmDAT3Y2rGO/fK1qpVKJu1gbv+Uvtb5jgT4NVAWq1WiIiIsjMzGTXrl2F6lQqFfPmzatw4yoalY8/rs8NJWPzR0Ur5XLqvLsWuasbAJIkob95FZMmF4eW7ZGpVPY1VlAjsSrAdu3a0a5dO1q2bGk1BklNQHPmmOUKvR5jWgpyVzf0sdGkfPgW+ujboNehrNuQWpNew6lLd/saK6hx2HwHfOGFF9i+fTsTJkwwrzvbt28fKSkpFW6cPVD4+FuuUKpQ+ud/4khduxj9zauQpwWTCUPMbdI3rcGk1drRUkFNxKYAV6xYwcmTJ3nppZfM0bby8vJYuHBhhRtnD9xHTUbmUNRdSN28DTKlEmNqMoa4mCL1hvgYNGeP28FCQU3GpgCPHj3K+vXrCQkJMQfiGTlyJPfuWZi4qIao6wfi8cqc/J5QoUDm4IjDk0/js3R9fgOZDIpGdcgvF36ogsfEpieMWq1Go9Hg4uJiji+i1WqpSQvp3foNwTUkDMP9aOSeXijcPMx1Ck9vVAENyUsrPORW+tfHuUsPe5sqqGHY7AHDwsIYNWoU//znP8nOzmb79u2MHz+eQYMG2cM+uyFTKlE1CCwkvgK85i1B3bIdMjd3ZI7OKJ9ogueMBcjUDpVgqaAmYbMHnDFjBo0bN+bw4cM0bdqUyMhIpkyZQt++fe1hX5VAWdsH39Vb0MfeRdJqUQU2RSa8YwTlgE0BFoQ2fzS8+fbt2/90S4JUAQ0r2wRBDcPmz/jrr7/OwYMHzdtxcXFMmDCB/fv3V6hhlYVkMJBz/DCZO7egj4uubHMENRybPeAXX3zBjBkzuH37Nn5+fqxevZrx48fzyiuv2MM+u6KPjyXlgzn5H9yNRrIOfInLXwZQa0p4ZZsmqKHY7AH9/Pz44osvuHXrFkuWLGHr1q1Mmzat2NwA1ZW0j5ejv3MT/gjRbkpPI/u/B9D9/lslWyaoqVjtARctWlRo28nJCZVKxcqVK82LYD/44IOKtc6OSJKEwcKQU8rOJPu7vXhNf7MSrBLUdKwK0NfXt0jZuHHjKtSYykQmkyFTqS3WKWp5WywXCB4XqwKcOXOm+e9ff/2Vtm3bApCdnc3Nmzfp0KFDhRtnbxzadsQQcxckk7lM4ReAa9gLlWiVoCZj80Vu8+bNzJo1C+0fjsd5eXm88cYbfPrppxVunL3xnDoP575hKPzrIfeqg6pZa7zC37H4cV4gKA9szoLu3r2bAwcOmOMbent7s2fPHoYNG8aUKVMq3EB7IlMq8Z61CEmvQ9Jqkbu5V7ZJghqOTQHq9foiKalUKlWhdMU1DZlKbfV9UCAoT2wKMCQkhLFjxxIaGoq7uztpaWl8++23pfIFXbZsGVeuXEEmk/HWW2/Rrl07c93p06dZs2YNCoWCHj16MGPGDHOdVqtlwIABzJgxg6FDh5by0gSCqo9NAS5YsID9+/dz4sQJ0tPTqVWrFpMnT6Z///4lOsHPP//M3bt32blzJzdv3mTBggXs3r3bXL9kyRI2b96Mr68vo0ePJjQ0lCZNmgCwYcMGatWqVbYrEwiqASUKzDt48GAGDx5cqGzp0qUlWpR75swZc+bVJk2akJmZSXZ2Nq6urkRHR+Ph4YG/f/6q9J49e3LmzBmaNGnC77//zs2bN+nVq1cpL0kgqD7YFGBcXBzr168nOjoakyl/ej4nJ4eEhIQSCTA5OZnWrVubt729vUlKSsLV1ZWkpCS8vB6E/KtduzbR0fkfw1euXMmiRYvYt29faa9JIKg22PwMMX/+fIxGI4MGDeL27duEhYXh7u7O+vXrS3SCRxfuSpJkXthraVGvTCZj3759dOjQgfr165foHAJBdcVmD5iYmMi2bdsA2LRpEyNGjCAkJIS5c+eyefNmmyfw9fUlOflBaPfExERq165tsS4hIYE6depw/PhxoqOjOX78OPHx8ajVavz8/HjmmWdKfYECQVXGpgAVCgWJiYn4+Pggl8vJyMjA09OT27dvl+gEwcHBrFu3jlGjRhEVFYWPjw+urq4A1KtXj+zsbGJiYvDz8+PYsWOsXr2al156ybz/unXrCAgIEOIT1EhsCnDixIn07duXiIgIevfuzZgxYwgICDD3Yrbo2LEjrVu3ZtSoUchkMt5991327NmDm5sbffv2ZfHixcyZMweA/v37ExgY+HhXJBBUI2RSCaIrpaam4uXlhclk4uDBg6SkpDBw4MAiIevtRUxMDH369OH7779/rPRkAkFFUJrns9geMC0tjcuXL6NWq+nYsSNOTk4MHDiwXI0VCP7MWBXgmTNnCA8Pp0GDBhgMBpKTk9m0aRMtWrSwp30CQY3GqgDXrFnDhg0b6NixIwA//vgjq1atKtHMp0AgKBlWvwNmZmaaxQf5XiqxsbF2MUog+LNgtQe0lN+6OseBMeVko/3lAjK1A+pWHVA4OVW2SQKBdQEajUYSExMLeas8WmYpbEVVJGvvdjK//gxTQXh5mQx10NP4vPM3c46HAu8cgcCeWBXg3bt36dmzZxF3sR498vMhyGQyrl69WrHWlQOG+Fgyd/8LU0bag0JJQnfxNDEv9kHh6o7M0RHHoC7UenmOiHgtsCtWBXjt2jV72lFhZH27q7D4HkaTi1GTC0B2XAwoVHiKGKACO2JVgOfPn7e5c6dOncrVmIqgxCvbDQa0l85WrDECwSNYFWDBUiOZTEZ0dDROTk64u7uTnp6OXq+ncePG1SI8vVvYSLIP70XKTLfZVsoTGW8F9sWqAI8cOQLA8uXLCQoKMidnkSSJb7/9lsjISPtY+JgovGrj0n8Y2Tu2AMV73Sn9AuxjlEDwBzZnHH788cdCmZFkMhlhYWH8+OOPFWpYeZGxays53+6ikPgcHFH4ByBzccvflslR1g/Ec9r8SrFR8OfF5moIpVLJV199Rb9+/XB1dSU7O5ujR49Wi2l7U56WnP9+g5SdVahc7uKGz4pNGNNTyT1+GKV/AC4hYcgt5IoXCCoSmwJcuXIlixcv5u2330YmkyFJEi1btmT58uX2sO+xMETfwZhwv0i5KTWJvGu/4NItBIcmwrdVUHmUKCbM5s2bcXR0JD09HQ8PDxwcqkdq5qxvd4LRUKRc5uKGqkHjSrBIICiMzXfAguHn6NGj2bZtGxEREdUmKK/ut/9ZLJe7uaNuIBb+Ciofmz3gxo0bAbh58yYXLlxg3759LF26lNq1a/Pvf/+7wg18HCSD3mK5Q9uOFssFAntTIr8rvV5PVlYW2dnZZGdnI0mSOVdEVUZV74kiZTJXd1wHjrS/MQKBBWz2gKNHj0av19OyZUs6dOjA3LlzadSokT1se2y8/rqQpPdmo7/7O+h1yD1r49K7v5h4EVQZbAqwadOmREVFcevWLTw8PPD09MTLy6tahIxXeNXGd+2/0Vw4jTE+FqdneqGsXT1WcAj+HNgU4HvvvQdAeno6Fy5c4Oeff2bdunXk5eXxn//8p0QnKUtylg8//JCIiAgMBgNTp07l2WefLcv1IZPLce7crUz7CgQVTYlyQyQkJHDp0iUuXbrE5cuX0Wg0hVbLF0dZkrMkJydz48YNdu7cSVpaGkOGDCmzAAWCqoxNAfbq1Qu5XE7nzp3p0qUL48ePp27duiU+QVmSs4wePdrcS3p4eKDRaDAajRZX6QsE1RmrArx69SotW7bks88+o0GDBubUZKWlLMlZFAqFOSno7t276dGjhxCfoEZi9TNEQbTqBg0aAPmzoWWhLMlZCjh69ChfffUV77zzTpnOLRBUdaz2gJaEUxbKkpwF4OTJk2zcuJFPP/0UNze3Mp1bIKjqWO0BH13tUNbVD8HBwXz33XcAxSZnMRgMHDt2jODgYLKysvjwww/55JNPqsXnDoGgrJRoFvRxKEtyloLZz/DwcPNxVq5cWarJn4fR348mL+oKhvhYHFq1wzGoa7VYTiWo+VgVYFpamtkP1NI2wLRp00p0krlz5xbafji8fadOndi5c2eh+pEjRzJy5OO7ixlTk0le9ga66/8zr4rIUqpQt2xLncV/R+4oYoMKKherAuzZsyd37941b/fq1avQdnUgZfUidFevFC406NH9epGMf3+M59R5lWOYQPAHVgW4YsUKe9pR7hgz09HH3LFar7tRM8IuCqo3VgW4aNEimzt/8MEH5WpMuSJJ+f+sVZuMdjRGILCMVQFWl7Dz1lB4eKL0r48uNdlivf5GFEnL5qNu0Ai3waORubiSd/lnDKlJOHXqjvbiGbIP78WUkYZjUBc8psxGLpwBBOWMVQHOnDmz2B2XLl1a7saUN16vLyZ+5ijQaIpWmkxoT/2A9tQP5Bw7BEo1xsT7oMsj3ckZSasx96DZ0bfJ+fE76n66D7mzi30vQlCjKVFMmPXr1xMdHY3JZAIgJyeHhIQEc/DeqorKLwClfwMMt34rtp0xvnDaNemPcPWFyjLSSPnofeq8tbJw+R8iFZ81BGXB5or4+fPnYzQaGTRoELdv3yYsLAx3d3fWr19vD/seG3ktL9uNSojuRpT5b1NWJslL5hL38lDiXh5K8vI3MeVml9u5BH8ObPaAiYmJbNu2DYBNmzYxYsQIQkJCmDt3bpXPlisZDOiiLpfb8WROzua/k5bMQRd5ybytiYsmOScbnyUfl9v5BDUfmwJUKBQkJibi4+ODXC4nIyMDT09Pbt++bQ/7yozJZCJu2gugtfD+V0acOuUv7NXf/R39rRtF6vW3fsOQcB+lb9k8dgTljz7hPplfbMKUnoqqUTPcX5iI/KEf0srGpgAnTpxI3759iYiIoHfv3owZM4aAgACzQ3VVRHfrOilr3sUUd6/8DqpS4/Z8/ooQQ1oKkianSBOTJgdjRpoQYBVB9/s1kpfMw5gYB4D2wim0l87is+KfVcYLyqYAR4wYQZ8+fVAqlcyePZtmzZqRmppKWFiYPewrFSZNLskfzEF38ypSTvm+jyl9/VG4ugPg0KItCr96GOOiH2lTF3Vgs3I9r6Bs6O7eInnZm2bxFaC/cZXsfV/gPmpyJVlWGJuTMHPmzDEvmpXL5YSFhTF+/HimTp1a4caVlrSNq8i7cr7cxYdajVPnHshUKgDkjk64PT8audeDUYCitg/uw8eb2wgqj9R/LCfxjVcwxsdYrM+7WXW8oKz2gD/88AM//PADJ0+eLOIVk5mZyb175Ti8Kyf0vz/ejZW5eiDptGbHbZmHJyoff5y69sRt+PhCbd0GjsCpc3eyvtmJTKHALWwkCu86hdpIBgOaiNNk/+crTOmpyFQqHDv3wG3EePIizqD5+STqJi1w6T1ACLecyIu6Qu7x75CKmZGuSmnorAqwffv2aDQajh49WsQrJiAggClTplS4caXmMfO7S9kZhQ8nk+M5YwHqRpaHlUofPzwnzyp8DKOBrH1fknvyCProO0UmgXS/XyPn8B6MGWmQpyVHoSD7293UWfIxCg/PojYZDeQcP0zeLxdwaB2UL1Zlha8is4kkSeRFXcaUnoZjUGfkzq6VbRIAOUcOFC++eg1xHzHBfgbZwOr/pLe3NwMGDCAwMJBWrVphNBpJT0/H09MT+WM+6BWFQ5uO+bOTkqlcjmdMSSTj3//Aa9YiFF62J50kSSJxwavo/nfJeiO9vvB7idGI/tZvpP3zb9Set6RQU5NWQ9LbM9BdjwKjgdxjh8g5vJc6Sz6u1AfekJRA8tJ5+QGPdXko/AJwGzoWtwHDK82mAuS1iv6IAaB2wKlzdzwmvobCo5ZdbSoOm0pyc3Nj4sSJtGvXju7du9OuXTumTp1KQkKCPewrFbUmzcKpe99yPab20jniXxtDwvwp6BPirLYz6XTcnza8ePEVQ96V8xgf8VvN+OKf6K7+8iDDk9GI7rdIMrZttHAE+5H60Xvob0SBLj9JjzE+lqydW4rYXxm4DRmD4tFZaIUS1/7DqL1gBSq/qjVDbVOAixYtokePHpw7d46oqChOnTpFx44dS7Rawt7IlEo8xr9avgc1GjClpaD732VSV75ZKDaOZDKhjbxE7vlTJISPwxRT9vWSprQU4l8bQ/aRA0iSRNY3u8g5vM9iW92NKHS3riPp7J+lKufEd+RZ+JExpiSSc2S/3e15FIWHJ17h76Bu3hZFbR+UAQ1x6TeEWpNm2d65EiiRJ8zEiRPN2x4eHkydOpUBAwZUqGFlJes/X1fYsfXRd9Df/R31E03Qx9whZeVC9DG3Qa8vdulTEWQyi+1NaSlk7tqC7uZVcg7vtZjbEEB38xqJ8yaj8K6DS7+huA99qayXVCpyTx8nbcOH+ddriSqSsMex3VM4rtmKSZMLCgV5kZfRnD2B45NPI68iNhZgVYC5ubk4OzujUCiIjo6mfv365rqYmJgq+R5o0uvJ+c9u2w3LiGTQI+VpAUhd+x56G07eFnFygjwdSJbXIxrjYsg59DWYinmP1euQ9GCIvUfm7n/h0LYjDk1bld6WUpJ94EukzAyLdQq/uriGDKpwG0qDISGO1FUL0cfeBYMBhV89PMa9ikuPkkdZlySJnCP7yT15FACnp3vi2n94uTnfWxXg8OHDOXjwINOnT2fo0KF07doVd3d30tLSiIiIqJKLceNnvQR/CKQikHt4oazbAENyIoZHVlDYxMERlX89cPdC/8vPxbe1Jj6lCh7JeShlppO970sc5lX8/4cpt6j3D4DMyQXPaW8gdy0+fKT2ynmyj+xH7uSM27CxqPzrF9veqh1aLZpTR5FMJpyD+1hdIpb2fx+gv3PTvG2Miybj3+tx6tQNuZMzpjwtaes/zI8ZJJlQBTbD668LCk1wpW9cRfaRA6DLf67yIi+iu3kN71nl8wpmMy7oc889R4cOHTh16hSpqak8+eSTvPvuu1Vuwa5Jr8d49/eKO4FMhikpnoTwcbj0GUCpoqTK5Hj+dSEuPUOJGdy1zCbIvbwxJcYXKS/P1f2SJJH9zU40Z44hmSQc23TEue9Asr7ahjHN8iSL2wsTcOoUXPRYRgOaM8fRx8WgOfUD+pvXzDPUmnMnqTUlHJeeoaWyT3PxDMn/twyS8ifE0j56H6e+g0GTg0mrwZiWgpSbjczBEUNC0R9JY3wMmjPHcendn5QPF6I9+6O5zhB9m+T0FHyW509yGbMy0Px8wiw+APQ6NKePkZSeisK7Du4jJjyW66FVAebl5XHp0iWzEBs1amTOCxgbG0tsbGyJE7SUJTtScftYImnHphLZUmrUDvmzfX/cB2N8DNkHd5uHokWQK+ARQahbtsWlZygyubz4oWVxKFU4NG+L5hEBypxdcX1uaNmOaYH0T1aT/d0+8wynLvIimXu2mbcLn1yGunkb3Aa/WKRKnxRPynuz87+FWshUbEpNImv3v3Du3jf/vpQAyWgk+R/LzeIrQPPfUkz+KBTIHBwwpiSh+y2ySLXu1m/o7/6OqmFjDLH3LM7sStmZaH8+CYD2/Ck8Z75l8QeoJFgVYGJiInPnzrUaEVsmk/H999/bPEFZsiOlpqYWu48ldDu22LSlTOh0RYpMaalgJTyFzNkF95GT0J7/CUmvQ9WoGbUmvvbgIVMqwWB5cgWZ3Oo3TMegLnjNfpeUnCzyrkchZWeiqO2Lc89QHNs9VaZLexRTbg6a8z8VFduj20olqmatce7aC7ewF5CpHYocK23dUvS3i64YeRhDcgLG1GSUtX1KZJ/+zg2Iv1+ittZQ1m2AU+ce6O/+bnFILWVnYUiMR9WwMcq69VF41saYVHTUUYAxOYGML/5Z/gKsX78+hw4dKtNBH6Ys2ZFSU1Ot7mN3ZGBxvGm0NuyTcO7R1+rMpPuE18j8dI2FCk88X55NxqdrMWWkPSh3cMSl9wA8X3kdmdqBOh98jO7eLQyx93Bo1d6i90xZMSbFY8pMt93QYEDdqDnuw8Zab3I/2mpdATKFEvkfDu4lQVaWFQxOzsgdnJCMepQ+dfGc8SYylQpVw0Yo6/hheCRynsK3Lg6t2uf/7V4Lx6eCyfn+G4s/xAWYUpOR9DpkKnWpzatwn6ayZEdKS0uzuo/dUamL9AAyz9rIVaoinvYAyvqBxWbh9RgyGmNWBjm7tpp7O4duIfgsyA8DqfCuQ/be7Zi0GtSBTfEY+2qRSQZ1g0aoG5R/mnCFbwDyWl4YrUy2PGioQP1Ek2KblORhdGjXqVSfBVQBDUvctgCnTt2oNTkcSZeH0r+eefZSplLj+vxoMrd/giktBQC5ey1c+g1B7vLgOfOc8SaqJ5qgOX0MY2pSvmAfGRXKnF3yJ8jKgFUBdunSpUwHfJSyZEcqbh+7olDg/cZScg7tQXfzGqbcHJQ+/rgNfQlJryfj8w1IWZkFhqOo44f3PNvBqrzGvYrXOMsOA07tO+HUvlN5XkWJkTs64tzjWbL3f/kgLk7B0Pmhd1dlg0a4hAws9lgObTpiiL5jdUitbNAI77nvl9pGdVAXdJfO2W4ok6N6ojGer8xB4eltsYnbc0NxbN+JrL3bwWjAddCoIj8sMpkMt4EjcBs4AkmvJ+H18ehvXX/IIEecnu5V5ufTqgAXL15cpgM+SlmyIymVSqv7WKP+fy4QPaBk70Iy37qQp0VKTy1a5+SCpMlB7l0Hl76DcO7aC+euvTAkxmPKSEX1RBPzr7vTU8+QuXc7pswMXEIH4dShfH60KpNaY19F3bhFvleLyYRj1x5IGg2aU99jystDVbcenq++YbOH85w6F0mXR17kpfwVJmpHFO61ULi54/R0L1z6DSnTQ+u75B/EThmC6eG1mHIFyoaNkKnUKP3rgVKJunFLXPs9j9yh+B5WVbc+XjPeLNG5ZSoVdd5fR9qGD9Hfv4dcrcbp6d64DR9X6usooMKHoMHBwaxbt45Ro0YVmx3Jz8+PY8eOsXr1atLS0qzuA2D84/0rPv6RGcFP9hE3yfrHYM/5y3Bs0eZBewttDPejMSTFo27cnGxXd7JjHlpT5uQOCYmFdwjLXyWvBXi4bXWmQVOYkp/Pwzz47vIXAIxAXK4WcktwrSMmIz2vQ9Jqkbm6IclkGP44ZnpsKb+jPszidUX+7wreyAumt7RAZlIF+aaO+6v5nHog85FrKXgujVbnCR5QJbMjBQYGFtnnYZKSkgAYM2ZM6YyZUTX9AQU1k6SkJBo2LP69VSaVNfNmJaLVaomMjKROnToidbWgymE0GklKSqJNmzY42phkqpYCFAhqClXPo1og+BNR+bENyog93NvK266zZ8+yZs0a5HI5gYGBLF26lKioKKZPn25+V2jWrFmFrLUszq7nn38eN7cHjtSrV6/G19fXLverONsSEhIKJXeNjo5mzpw5BAYG2uWeXb9+nenTpzNhwgReeqmwY0W5PWNSNeTcuXPSK6+8IkmSJN24cUMaPnx4ofrnnntOun//vmQ0GqWRI0dKN27csLmPPezq27evFBcXJ0mSJP31r3+Vjh8/Lp07d05asmRJudtSGrsGDx5c6n3sZVsBer1eGjVqlJSdnW2Xe5aTkyO99NJL0ttvvy1t27atSH15PWPVcghqzb0NKOTeJpfLze5txe1jD7sA9uzZg5+fHwBeXl6kpaWRk2PD68QOdlmywR73qzTn2bt3L6Ghobi4uNjlnqnVajZt2oSPT1E/1fJ8xqqlAJOTk/H0fOADWeCqBlh0b0tKSip2H3vYBZi/ZSYmJnL69Gl69uxJbm4uERERTJkyhTFjxnD27NlytakkdqWnpzNnzhxGjRrF2rVrkSTJLverJLYVsHv3boYPzw/6ZI97plQqrc5gluczVi3fAaUq6t5WknOkpKQwbdo03nnnHTw9PWnRogUzZsygT58+3L59m4kTJ3LkyBHU6tI79pbVrtmzZzNo0CAcHByYPn06R44csZs7YEnOc+nSJRo1amT+AbPHPSuNzVD2Z6xaCtBe7m3laRdAdnY2L7/8MrNmzaJbt/xEL40bN6Zx48YABAYGUrt2bRISEgqFAKlou0aPHm3+u1evXvz2228297GXbQDHjx/n6aefNm/b456VxubHecaq5RA0ODiY7777DqBY9zaDwcCxY8cIDg4udh972AWwYsUKxo8fT8+ePc1lX331FZ999hmQP7RJSUkp92gDxdmVmprKyy+/jP6PQEvnz5+nadOmdrlftmwr4Ndff6VFixbmbXvcs+Ioz2es2n6IX716NRcuXDC7qkVFRZnd286fP8/q1asBePbZZ5k8ebLFfR7+T61ou7p160anTp0ICgoytx04cCD9+vVj7ty55ObmotPpmDlzZiGBVrRdffv25dNPP+XgwYOo1WpatWrF22+/jVwut8v9smUbQFhYGFu3bjX3JhkZGRV+zyIjI1m5ciWxsbEolUp8fX3p3bs39erVK9dnrNoKUCCoCVTLIahAUFMQAhQIKhEhQIGgEhECFAgqESFAgaASqZYf4msSzZs3p0GDBoUWFgcEBLB582Z69+7Nhx9+yFNPPV7cz7Fjx3L79m1cXV3RaDT4+voyZswYBg8e/LjmF+LKlSs4ODjQokULPv/8c5KTkwkPDy/Xc9Q0hACrANu2bTM7aVcU8+bNMwsuMjKSN954g/j4eKZOnVpu5/j666958sknadGiRZHlOwLLiCFoNeHQoUPmD/fjxo3j3r17nD59mhdffBAWfsqUKeb4OpD/Aft///tfkWO1adOG1atXs3HjRrKystizZw8TJkww1z+8/eabb7J8+XLCwsI4dOgQGo2G8PBwQkND6d27NytXrgTgyy+/ZP/+/axatYqtW7eybt06Fi5cCMD9+/eZPHkyoaGhDBw4kH379gH5Wba6devGZ599RlhYGN27d+fgwYPlfOeqNqIHrAbcv3+fRYsW8fXXX9OwYUO2bNnCO++8w8aNG7lx4wZ6vR65XE5aWhopKflBZjMzM0lKSqJly5YWj9myZUv8/f25fPmyzfOfOXOGr776CgcHB7Zs2UJOTg6HDx8mMzOTZ599lj59+vDiiy9y8OBBhg8fzuDBg1m3bp15/0WLFtG5c2c2b95MbGwsgwcPNg+r09LSkMvlfPPNNxw6dIi1a9fSv3//x79p1QQhwCrA2LFjC70DPvXUUyxZ8iBf/KlTp+jSpYt5BfiIESNYtWoVCoWCFi1acPXqVRQKBY0aNSI5OZmEhASuXr1K586di83j6OrqSlZWlk37nn76aRwc8vM/TJo0ibFjxyKTyfDw8KBp06bExMRYfU/V6/WcPn2ajz76CMh/v+3SpQtnz56la9euGAwGhg7NTy7TunVr7t9/vNwP1Q0hwCqArXfAtLQ03N0f5FBwc3NDkiTS09Pp0qWLOYtVUFAQSUlJREREEBUVRdeuxadCi42Nxdvbm1gbMTo9PDzMf9+5c4cVK1Zw69Yt5HI58fHxZgFZIj09HUmSCoW8cHd3JzU1PyiyQqHA2dkZALlcjqms2aOqKeIdsBrg7e1Nenq6eTsjIwO5XI6npyddunTh8uXLRERE0LFjR4KCgrh48SIRERGFlvA8yoULF8jLy6Ndu3bI5fJCQWQzMixnwQV4//33adq0KYcOHeLw4cM2nY09PT2Ry+WFjpmeno63t+Vw8X82hACrAcHBwVy4cIHo6Pxw7Dt27CA4OBilUkmHDh24du0a169fp1mzZnTo0IGLFy+SnJxMYGCgxeNdu3aNhQsXEh4ejpOTEz4+Pty5c4e8vDw0Go15SY0lUlJSaNmyJQqFglOnTnH37l1ziAilUllkSKtUKgkODmbnzp0A3Lt3jwsXLvDMM8+Ux62p9oghaDXAz8+PDz74gOnTp2MwGAgICDCnCFer1fj6+qJQKJDL5bi7u6PT6YokT121ahUbNmxAq9Xi5ubGq6++yvPPPw/kJ+Jp164doaGh1KtXj5CQEH766SeLtrz66qssWbKEjz/+mL59+zJz5kzWrFlDq1atCAkJYdWqVURHRxdaB/f+++/z9ttvs2fPHlQqFUuWLMHf35+YmhLK/zEQy5EEgkpEDEEFgkpECFAgqESEAAWCSkQIUCCoRIQABYJKRAhQIKhEhAAFgkpECFAgqESEAAWCSuT/AYr2VMGsfkpDAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 216x144 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "%%time \n",
    "# if GAN_type == 'CGAN' or GAN_type == 'WCGAN':\n",
    "if USE_KMEANS_FOR_CLASSIFICATION:\n",
    "    algorithms = [ \n",
    "    #     [ 'KMeans', cluster.KMeans, (), {'random_state':0} ],\n",
    "        [ 'KMeans', cluster.KMeans, (), {'n_clusters':1, 'random_state':0} ],\n",
    "    #     [ 'KMeans 3', cluster.KMeans, (), {'n_clusters':3, 'random_state':0} ],\n",
    "    #     [ 'Agglomerative', cluster.AgglomerativeClustering, (), {} ],\n",
    "    #     [ 'Agglomerative', cluster.AgglomerativeClustering, (), {'linkage': 'ward', 'n_clusters': 3} ],\n",
    "    #     [ 'Agg. Ave 3', cluster.AgglomerativeClustering, (), {'linkage': 'average', 'n_clusters': 3} ],\n",
    "    #     [ 'Agg. Complete 3', cluster.AgglomerativeClustering, (), {'linkage': 'complete', 'n_clusters': 3} ],\n",
    "    #     [ 'DBSCAN', cluster.DBSCAN, (), {'eps':0.025} ],\n",
    "    #     [ 'HDBSCAN', hdbscan.HDBSCAN, (), {} ],\n",
    "    #     [ 'HDBSCAN', hdbscan.HDBSCAN, (), {'min_cluster_size':10, 'min_samples':1, } ],\n",
    "    #     [ 'HDBSCAN 2 10', hdbscan.HDBSCAN, (), {'min_cluster_size':2, 'min_samples':10, } ],\n",
    "    #     [ 'HDBSCAN 10 10 ', hdbscan.HDBSCAN, (), {'min_cluster_size':10, 'min_samples':10, } ],\n",
    "    ]\n",
    "\n",
    "    rows = len(algorithms)\n",
    "    columns = 1\n",
    "    fig, ax = plt.subplots(3, 2, figsize=(3, 2),\n",
    "                            constrained_layout=True)\n",
    "\n",
    "    for i, [name, algorithm, args, kwds] in enumerate(algorithms):\n",
    "\n",
    "        labels = algorithm(*args, **kwds).fit_predict(train_no_label)\n",
    "        print(len(labels))\n",
    "        colors = np.clip(labels,-1,9)\n",
    "        colors = [ 'C'+str(i) if i>-1 else 'white' for i in colors ]\n",
    "\n",
    "        plt.subplot(rows,columns,i*columns+1)\n",
    "        plt.scatter(train_no_label[data_cols[0]], train_no_label[data_cols[1]], c=colors)\n",
    "        plt.xlabel(data_cols[0]), plt.ylabel(data_cols[1])\n",
    "        plt.title(name)\n",
    "            \n",
    "\n",
    "#     else:\n",
    "#         labels = train_bots_only['Label'].values.tolist() \n",
    "#         sns.set(style=\"ticks\", color_codes=True) # Remove background and grid\n",
    "\n",
    "#     #     g = sns.scatterplot(data_cols[0],data_cols[1], data=train, hue=labels)\n",
    "\n",
    "#     #     plt.show() \n",
    "\n",
    "\n",
    "#         plt.figure()\n",
    "#         ax = sns.countplot(y=\"Label\", data=train_bots_only) # for Seaborn version 0.7 and more\n",
    "#         for p in ax.patches:\n",
    "#             ax.text(p.get_y() + p.get_width() + 2700 , p.get_y()+p.get_height()-0.1, p.get_width(), ha=\"center\") \n",
    "\n",
    "#         ax.set_ylabel('Botnets')\n",
    "\n",
    "#         plt.savefig('Botnet-Trainset.pdf', dpi=600)\n",
    "#         plt.show()\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#         plt.figure(figsize=(6, 6))\n",
    "#         ax = sns.countplot(y=\"Label\", data=test_bots_only) # for Seaborn version 0.7 and more\n",
    "#         for p in ax.patches:\n",
    "#             ax.text(p.get_y() + p.get_width() + 6000 , p.get_y()+p.get_height()-0.1, p.get_width(), ha=\"center\") \n",
    "\n",
    "#         ax.set_ylabel('Botnets')\n",
    "\n",
    "#         plt.savefig('Botnet-Testset.pdf', dpi=600)\n",
    "#         plt.show()\n",
    "\n",
    "#     #     g = sns.catplot(x=\"class\", hue=\"who\", col=\"survived\", data=titanic, kind=\"count\", height=4, aspect=.7);\n",
    "\n",
    "\n",
    "#     #     sns.pairplot(data=train, vars=[data_cols[0], data_cols[1]], hue='Label')\n",
    "\n",
    "\n",
    "#     # plt.grid(False)\n",
    "#     # plt.show()\n",
    "#     print(train_no_label.describe())\n",
    "    \n",
    "    botnet_w_classes = train_no_label.copy()\n",
    "    botnet_w_classes['Label'] = labels\n",
    "\n",
    "#     print(botnet_w_classes.describe())\n",
    "    train_data = botnet_w_classes\n",
    "    \n",
    "# else:\n",
    "#     train_data = train_no_label\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "elapsed": 54,
     "status": "ok",
     "timestamp": 1646286481008,
     "user": {
      "displayName": "Rizwan Hamid Randhawa",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCoBmDlAGvdCQjk-uJ2xxx1pJC0fjRsK2FcWLNdaY=s64",
      "userId": "15615777248917371178"
     },
     "user_tz": 0
    },
    "id": "wmB4aCmMWj2D",
    "outputId": "bb6c85f1-3ae0-4e88-99bd-0b6f26f1f70d",
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0       0\n",
       "1       0\n",
       "2       0\n",
       "3       0\n",
       "4       0\n",
       "       ..\n",
       "1743    0\n",
       "1744    0\n",
       "1745    0\n",
       "1746    0\n",
       "1747    0\n",
       "Name: Label, Length: 1748, dtype: int32"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_data['Label']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "q168UjfTWj2E"
   },
   "source": [
    "<a id=\"GPU Settings\"><h2>GAN Training</h2></a>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Result: (1792, 61)\n",
      "log_interval : 7\n",
      "Total Batch Iterations: 1050\n",
      "['FlowDuration', 'TotalFwdPacket', 'TotalBwdpackets', 'TotalLengthofFwdPacket', 'TotalLengthofBwdPacket', 'FwdPacketLengthMax', 'FwdPacketLengthMin', 'FwdPacketLengthMean', 'FwdPacketLengthStd', 'BwdPacketLengthMax', 'BwdPacketLengthMin', 'BwdPacketLengthMean', 'BwdPacketLengthStd', 'FlowBytes/s', 'FlowPackets/s', 'FlowIATMean', 'FlowIATMax', 'FlowIATMin', 'FwdIATTotal', 'FwdIATMean', 'FwdIATMax', 'FwdIATMin', 'BwdIATTotal', 'BwdIATMean', 'BwdIATStd', 'BwdIATMax', 'BwdIATMin', 'FwdPSHFlags', 'FwdHeaderLength', 'BwdHeaderLength', 'FwdPackets/s', 'BwdPackets/s', 'PacketLengthMin', 'PacketLengthMax', 'PacketLengthMean', 'PacketLengthStd', 'PacketLengthVariance', 'FINFlagCount', 'SYNFlagCount', 'RSTFlagCount', 'PSHFlagCount', 'ACKFlagCount', 'Down/UpRatio', 'AveragePacketSize', 'FwdSegmentSizeAvg', 'BwdSegmentSizeAvg', 'BwdBytes/BulkAvg', 'BwdPacket/BulkAvg', 'BwdBulkRateAvg', 'SubflowFwdPackets', 'SubflowFwdBytes', 'SubflowBwdBytes', 'FWDInitWinBytes', 'BwdInitWinBytes', 'FwdActDataPkts', 'FwdSegSizeMin', 'IdleMean', 'IdleStd', 'IdleMax', 'IdleMin']\n",
      "ISCX-2014_2022-05-15 23:30:40.977164\n",
      "WARNING:tensorflow:From /home/nu/anaconda3/lib/python3.9/site-packages/keras/layers/normalization/batch_normalization.py:532: _colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Colocations handled automatically by placer.\n",
      "['loss']\n",
      "Normal: (246929, 61)\n",
      "Bots: (1748, 61)\n",
      "(1223, 61) (172850, 61)\n",
      "Estimating Classifiers..\n",
      "XGB..\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/nu/anaconda3/lib/python3.9/site-packages/xgboost/data.py:250: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n",
      "  elif isinstance(data.columns, (pd.Int64Index, pd.RangeIndex)):\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "======================================================\n",
      "Starting GAN Training..\n",
      "======================================================\n",
      "['loss']\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/nu/anaconda3/lib/python3.9/site-packages/keras/engine/training_v1.py:2079: UserWarning: `Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically.\n",
      "  updates=self.state_updates,\n",
      "2022-05-15 23:30:49.173389: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA\n",
      "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2022-05-15 23:30:51.006232: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 7383 MB memory:  -> device: 0, name: NVIDIA GeForce GTX 1070 Ti, pci bus id: 0000:0a:00.0, compute capability: 6.1\n",
      "2022-05-15 23:30:51.008132: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 7383 MB memory:  -> device: 1, name: NVIDIA GeForce GTX 1070 Ti, pci bus id: 0000:0b:00.0, compute capability: 6.1\n",
      "2022-05-15 23:30:51.010187: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 6629 MB memory:  -> device: 2, name: NVIDIA GeForce GTX 1070 Ti, pci bus id: 0000:42:00.0, compute capability: 6.1\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?\n",
      "WARNING:tensorflow:Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/nu/anaconda3/lib/python3.9/site-packages/keras/optimizer_v2/adam.py:105: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.\n",
      "  super(Adam, self).__init__(name, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/nu/anaconda3/lib/python3.9/site-packages/keras/engine/training_v1.py:2079: UserWarning: `Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically.\n",
      "  updates=self.state_updates,\n",
      "/home/nu/anaconda3/lib/python3.9/site-packages/keras/engine/training_v1.py:2079: UserWarning: `Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically.\n",
      "  updates=self.state_updates,\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "   94/10000 [..............................] - ETA: 55s - reward: 0.0745"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/nu/anaconda3/lib/python3.9/site-packages/keras/engine/training_v1.py:2079: UserWarning: `Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically.\n",
      "  updates=self.state_updates,\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 1999/10000 [====>.........................] - ETA: 2:42 - reward: 0.0465done, took 41.925 seconds\n",
      "WARNING:tensorflow:Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?\n",
      "  g_loss: 1.4007685\n",
      "Evasions: 93\n",
      "Time left = 2.31 hours\n",
      "Total Time Taken: 0.9 minutes\n",
      "epoch_number: 1 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  199/10000 [..............................] - ETA: 1:59 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:25 - reward: 0.0125done, took 36.360 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  952/10000 [=>............................] - ETA: 2:40 - reward: 0.0252>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:26 - reward: 0.0235done, took 36.594 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 11\n",
      " 1522/10000 [===>..........................] - ETA: 2:33 - reward: 0.0263>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:26 - reward: 0.0240done, took 36.523 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1953/10000 [====>.........................] - ETA: 2:29 - reward: 0.0348>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:28 - reward: 0.0350done, took 37.172 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 6\n",
      "    1/10000 [..............................] - ETA: 1:00 - reward: 1.0000Attacks: 6\n",
      " 2000/10000 [=====>........................] - ETA: 2:28 - reward: 0.0380done, took 37.063 seconds\n",
      "  g_loss: 3.4287663\n",
      "Evasions: 266\n",
      "Time left = 5.0 hours\n",
      "Total Time Taken: 4.1 minutes\n",
      "epoch_number: 2 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  258/10000 [..............................] - ETA: 2:12 - reward: 0.0233>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:27 - reward: 0.0315done, took 36.912 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  688/10000 [=>............................] - ETA: 2:44 - reward: 0.0262>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0350done, took 38.324 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 12\n",
      "    1/10000 [..............................] - ETA: 51s - reward: 1.0000Attacks: 12\n",
      "  888/10000 [=>............................] - ETA: 2:45 - reward: 0.0473>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      "  900/10000 [=>............................] - ETA: 2:45 - reward: 0.0467Attacks: 1\n",
      " 2000/10000 [=====>........................] - ETA: 2:30 - reward: 0.0505done, took 37.597 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1003/10000 [==>...........................] - ETA: 2:45 - reward: 0.0429>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0475done, took 38.608 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 7\n",
      "  965/10000 [=>............................] - ETA: 2:42 - reward: 0.0642>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1870/10000 [====>.........................] - ETA: 2:33 - reward: 0.0647Attacks: 1\n",
      "Attacks: 1\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0670done, took 38.066 seconds\n",
      "  g_loss: 2.9587088\n",
      "Evasions: 463\n",
      "Time left = 5.94 hours\n",
      "Total Time Taken: 7.3 minutes\n",
      "epoch_number: 3 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  912/10000 [=>............................] - ETA: 2:46 - reward: 0.0461>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0530done, took 38.550 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  885/10000 [=>............................] - ETA: 2:49 - reward: 0.0565>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0561done, took 38.882 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  774/10000 [=>............................] - ETA: 2:46 - reward: 0.0762>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0716done, took 38.107 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  590/10000 [>.............................] - ETA: 2:44 - reward: 0.0712>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      "  906/10000 [=>............................] - ETA: 2:47 - reward: 0.0795Attacks: 1\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0640done, took 38.112 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   61/10000 [..............................] - ETA: 52s - reward: 0.1148Attacks: 3\n",
      "  382/10000 [>.............................] - ETA: 2:48 - reward: 0.1099>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:37 - reward: 0.0816done, took 39.363 seconds\n",
      "  g_loss: 1.9650804\n",
      "Evasions: 652\n",
      "Time left = 6.42 hours\n",
      "Total Time Taken: 10.6 minutes\n",
      "epoch_number: 4 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  150/10000 [..............................] - ETA: 1:40 - reward: 0.0600>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0480done, took 38.300 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  314/10000 [..............................] - ETA: 2:29 - reward: 0.0318>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0325done, took 38.086 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  754/10000 [=>............................] - ETA: 2:45 - reward: 0.0252>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0345done, took 38.158 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  954/10000 [=>............................] - ETA: 2:44 - reward: 0.0472>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0530done, took 38.295 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1083/10000 [==>...........................] - ETA: 2:48 - reward: 0.0360>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0395done, took 38.966 seconds\n",
      "  g_loss: 1.2115355\n",
      "Evasions: 415\n",
      "Time left = 6.68 hours\n",
      "Total Time Taken: 13.8 minutes\n",
      "epoch_number: 5 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1281/10000 [==>...........................] - ETA: 2:43 - reward: 0.0422>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1830/10000 [====>.........................] - ETA: 2:36 - reward: 0.0372Attacks: 1\n",
      " 1832/10000 [====>.........................] - ETA: 2:36 - reward: 0.0377Attacks: 1\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0370done, took 38.529 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1591/10000 [===>..........................] - ETA: 2:39 - reward: 0.0409>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0390done, took 38.497 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 6\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "    1/10000 [..............................] - ETA: 48s - reward: 1.0000Attacks: 6\n",
      " 1958/10000 [====>.........................] - ETA: 2:33 - reward: 0.0342>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0345done, took 38.199 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0350done, took 38.061 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 6\n",
      "  291/10000 [..............................] - ETA: 2:21 - reward: 0.0378>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0325done, took 38.262 seconds\n",
      "  g_loss: 1.0220001\n",
      "Evasions: 356\n",
      "Time left = 6.83 hours\n",
      "Total Time Taken: 17.1 minutes\n",
      "epoch_number: 6 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 4\n",
      "  633/10000 [>.............................] - ETA: 2:50 - reward: 0.0442>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0400done, took 39.180 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  932/10000 [=>............................] - ETA: 2:46 - reward: 0.0376>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0355done, took 38.508 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1143/10000 [==>...........................] - ETA: 2:46 - reward: 0.0490>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0425done, took 38.549 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1296/10000 [==>...........................] - ETA: 2:45 - reward: 0.0540>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0550done, took 38.926 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 1\n",
      " 1447/10000 [===>..........................] - ETA: 2:41 - reward: 0.0394>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0385done, took 38.768 seconds\n",
      "  g_loss: 1.0305376\n",
      "Evasions: 423\n",
      "Time left = 6.93 hours\n",
      "Total Time Taken: 20.4 minutes\n",
      "epoch_number: 7 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1697/10000 [====>.........................] - ETA: 2:39 - reward: 0.0424>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0435done, took 38.821 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 2\n",
      " 1869/10000 [====>.........................] - ETA: 2:38 - reward: 0.0342Attacks: 1\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0335done, took 39.191 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   11/10000 [..............................] - ETA: 51s - reward: 0.0000e+00 >>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0365done, took 39.033 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  251/10000 [..............................] - ETA: 2:20 - reward: 0.0757>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0415done, took 38.378 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 6\n",
      "    1/10000 [..............................] - ETA: 50s - reward: 1.0000Attacks: 6\n",
      "Attacks: 6\n",
      "Attacks: 6\n",
      "Attacks: 6\n",
      "  452/10000 [>.............................] - ETA: 2:41 - reward: 0.0642>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0435done, took 38.265 seconds\n",
      "  g_loss: 0.91713226\n",
      "Evasions: 398\n",
      "Time left = 7.0 hours\n",
      "Total Time Taken: 23.7 minutes\n",
      "epoch_number: 8 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  809/10000 [=>............................] - ETA: 2:46 - reward: 0.0297>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0310done, took 38.275 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 9\n",
      " 1104/10000 [==>...........................] - ETA: 2:48 - reward: 0.0426>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0395done, took 38.979 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1413/10000 [===>..........................] - ETA: 2:43 - reward: 0.0382>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0360done, took 38.721 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1888/10000 [====>.........................] - ETA: 2:34 - reward: 0.0275>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0290done, took 38.129 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0360done, took 38.008 seconds\n",
      "  g_loss: 0.8595834\n",
      "Evasions: 343\n",
      "Time left = 7.03 hours\n",
      "Total Time Taken: 26.9 minutes\n",
      "epoch_number: 9 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  206/10000 [..............................] - ETA: 2:04 - reward: 0.0291>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0340done, took 38.406 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  600/10000 [>.............................] - ETA: 2:42 - reward: 0.0267>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:31 - reward: 0.0280done, took 37.939 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  968/10000 [=>............................] - ETA: 2:45 - reward: 0.0413>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0370done, took 38.145 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1563/10000 [===>..........................] - ETA: 2:39 - reward: 0.0173>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0225done, took 38.308 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1991/10000 [====>.........................] - ETA: 2:31 - reward: 0.0286>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:31 - reward: 0.0285done, took 37.798 seconds\n",
      "  g_loss: 0.89441925\n",
      "Evasions: 300\n",
      "Time left = 7.04 hours\n",
      "Total Time Taken: 30.2 minutes\n",
      "epoch_number: 10 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0375done, took 38.874 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  251/10000 [..............................] - ETA: 2:19 - reward: 0.0518>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0425done, took 38.869 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  524/10000 [>.............................] - ETA: 2:41 - reward: 0.0324>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0355done, took 38.250 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 5\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "    1/10000 [..............................] - ETA: 49s - reward: 1.0000Attacks: 5\n",
      "  808/10000 [=>............................] - ETA: 2:48 - reward: 0.0408>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0415done, took 38.862 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1079/10000 [==>...........................] - ETA: 2:46 - reward: 0.0343>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0335done, took 38.429 seconds\n",
      "  g_loss: 0.8684562\n",
      "Evasions: 381\n",
      "Time left = 7.04 hours\n",
      "Total Time Taken: 33.4 minutes\n",
      "epoch_number: 11 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1387/10000 [===>..........................] - ETA: 2:45 - reward: 0.0397>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0410done, took 38.898 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 1\n",
      " 1599/10000 [===>..........................] - ETA: 2:42 - reward: 0.0419>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0400done, took 39.217 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1775/10000 [====>.........................] - ETA: 2:42 - reward: 0.0468>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:38 - reward: 0.0455done, took 39.502 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0325done, took 38.408 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  142/10000 [..............................] - ETA: 1:34 - reward: 0.0352>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0365done, took 38.405 seconds\n",
      "  g_loss: 0.79310495\n",
      "Evasions: 391\n",
      "Time left = 7.04 hours\n",
      "Total Time Taken: 36.7 minutes\n",
      "epoch_number: 12 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  521/10000 [>.............................] - ETA: 2:40 - reward: 0.0230>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:31 - reward: 0.0215done, took 37.876 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1033/10000 [==>...........................] - ETA: 2:46 - reward: 0.0358>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0325done, took 38.365 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 1\n",
      " 1403/10000 [===>..........................] - ETA: 2:43 - reward: 0.0371>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0330done, took 38.361 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 11\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0200done, took 38.046 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   41/10000 [..............................] - ETA: 51s - reward: 0.0488>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0275done, took 38.549 seconds\n",
      "  g_loss: 0.8576844\n",
      "Evasions: 269\n",
      "Time left = 7.02 hours\n",
      "Total Time Taken: 40.0 minutes\n",
      "epoch_number: 13 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  568/10000 [>.............................] - ETA: 2:42 - reward: 0.0246>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0315done, took 38.212 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  950/10000 [=>............................] - ETA: 2:45 - reward: 0.0316>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0295done, took 38.274 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 7\n",
      " 1581/10000 [===>..........................] - ETA: 2:39 - reward: 0.0183>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0200done, took 38.298 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 4\n",
      " 1949/10000 [====>.........................] - ETA: 2:35 - reward: 0.0354>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0345done, took 38.695 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0390done, took 38.706 seconds\n",
      "  g_loss: 0.65801406\n",
      "Evasions: 309\n",
      "Time left = 7.0 hours\n",
      "Total Time Taken: 43.3 minutes\n",
      "epoch_number: 14 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  219/10000 [..............................] - ETA: 2:09 - reward: 0.0411>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0245done, took 38.479 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  829/10000 [=>............................] - ETA: 2:44 - reward: 0.0241>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:31 - reward: 0.0225done, took 37.760 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1377/10000 [===>..........................] - ETA: 2:40 - reward: 0.0290>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:31 - reward: 0.0305done, took 37.929 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1853/10000 [====>.........................] - ETA: 2:34 - reward: 0.0270>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0280done, took 38.062 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0295done, took 38.174 seconds\n",
      "  g_loss: 0.7019881\n",
      "Evasions: 270\n",
      "Time left = 6.97 hours\n",
      "Total Time Taken: 46.5 minutes\n",
      "epoch_number: 15 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 2\n",
      "    1/10000 [..............................] - ETA: 51s - reward: 1.0000Attacks: 2\n",
      "  302/10000 [..............................] - ETA: 2:23 - reward: 0.0232>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0245done, took 38.181 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  924/10000 [=>............................] - ETA: 2:44 - reward: 0.0184>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0265done, took 38.264 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1459/10000 [===>..........................] - ETA: 2:40 - reward: 0.0212>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0210done, took 38.061 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1973/10000 [====>.........................] - ETA: 2:34 - reward: 0.0289>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0295done, took 38.612 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0370done, took 38.580 seconds\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  g_loss: 0.63216394\n",
      "Evasions: 277\n",
      "Time left = 6.94 hours\n",
      "Total Time Taken: 49.7 minutes\n",
      "epoch_number: 16 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 1\n",
      "  327/10000 [..............................] - ETA: 2:26 - reward: 0.0092>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0225done, took 38.223 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 6\n",
      "  828/10000 [=>............................] - ETA: 2:46 - reward: 0.0338>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0335done, took 38.460 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1231/10000 [==>...........................] - ETA: 2:45 - reward: 0.0309>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0255done, took 38.340 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 1\n",
      " 1900/10000 [====>.........................] - ETA: 2:33 - reward: 0.0221>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0220done, took 38.082 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0250done, took 38.059 seconds\n",
      "  g_loss: 0.67836773\n",
      "Evasions: 257\n",
      "Time left = 6.91 hours\n",
      "Total Time Taken: 53.0 minutes\n",
      "epoch_number: 17 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  423/10000 [>.............................] - ETA: 2:38 - reward: 0.0331>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0315done, took 38.285 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  947/10000 [=>............................] - ETA: 2:44 - reward: 0.0190>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0245done, took 38.070 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1362/10000 [===>..........................] - ETA: 2:44 - reward: 0.0323>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0295done, took 38.451 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1863/10000 [====>.........................] - ETA: 2:36 - reward: 0.0295>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0310done, took 38.643 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0185done, took 38.619 seconds\n",
      "  g_loss: 0.61530155\n",
      "Evasions: 270\n",
      "Time left = 6.87 hours\n",
      "Total Time Taken: 56.3 minutes\n",
      "epoch_number: 18 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  604/10000 [>.............................] - ETA: 2:42 - reward: 0.0099>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0220done, took 38.205 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1162/10000 [==>...........................] - ETA: 2:44 - reward: 0.0224>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0215done, took 38.527 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1736/10000 [====>.........................] - ETA: 2:40 - reward: 0.0271>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0275done, took 39.141 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0170done, took 38.539 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  428/10000 [>.............................] - ETA: 2:39 - reward: 0.0234>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0145done, took 38.560 seconds\n",
      "  g_loss: 0.65454996\n",
      "Evasions: 205\n",
      "Time left = 6.84 hours\n",
      "Total Time Taken: 59.5 minutes\n",
      "epoch_number: 19 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1288/10000 [==>...........................] - ETA: 2:42 - reward: 0.0155>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:31 - reward: 0.0165done, took 37.959 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0165done, took 37.996 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   32/10000 [..............................] - ETA: 51s - reward: 0.0312  >>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0140done, took 38.003 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  935/10000 [=>............................] - ETA: 2:43 - reward: 0.0096>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0120done, took 38.349 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1735/10000 [====>.........................] - ETA: 2:39 - reward: 0.0167>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0165done, took 38.696 seconds\n",
      "  g_loss: 0.6257067\n",
      "Evasions: 151\n",
      "Time left = 6.8 hours\n",
      "Total Time Taken: 62.8 minutes\n",
      "epoch_number: 20 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 1\n",
      "    1/10000 [..............................] - ETA: 51s - reward: 1.0000Attacks: 1\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0175done, took 38.285 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  486/10000 [>.............................] - ETA: 2:40 - reward: 0.0165>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0220done, took 38.735 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1140/10000 [==>...........................] - ETA: 2:45 - reward: 0.0175>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0160done, took 38.315 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1797/10000 [====>.........................] - ETA: 2:36 - reward: 0.0239>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0235done, took 38.341 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 4\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0200done, took 38.651 seconds\n",
      "  g_loss: 0.66973644\n",
      "Evasions: 198\n",
      "Time left = 6.76 hours\n",
      "Total Time Taken: 66.0 minutes\n",
      "epoch_number: 21 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  513/10000 [>.............................] - ETA: 2:43 - reward: 0.0136>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0200done, took 38.813 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1301/10000 [==>...........................] - ETA: 2:42 - reward: 0.0092>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0125done, took 38.208 seconds\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1872/10000 [====>.........................] - ETA: 2:36 - reward: 0.0267>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0250done, took 38.482 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 10\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0165done, took 38.280 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 1\n",
      "    1/10000 [..............................] - ETA: 48s - reward: 1.0000Attacks: 1\n",
      "  623/10000 [>.............................] - ETA: 2:44 - reward: 0.0225>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0160done, took 38.296 seconds\n",
      "  g_loss: 0.6024226\n",
      "Evasions: 180\n",
      "Time left = 6.72 hours\n",
      "Total Time Taken: 69.3 minutes\n",
      "epoch_number: 22 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1470/10000 [===>..........................] - ETA: 2:43 - reward: 0.0150>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0140done, took 38.897 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0090done, took 38.192 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  427/10000 [>.............................] - ETA: 2:40 - reward: 0.0141>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0095done, took 38.410 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1430/10000 [===>..........................] - ETA: 2:44 - reward: 0.0105>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0100done, took 38.721 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0120done, took 38.222 seconds\n",
      "  g_loss: 0.5192746\n",
      "Evasions: 109\n",
      "Time left = 6.68 hours\n",
      "Total Time Taken: 72.6 minutes\n",
      "epoch_number: 23 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  339/10000 [>.............................] - ETA: 2:33 - reward: 0.0147>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0070done, took 38.533 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1365/10000 [===>..........................] - ETA: 2:44 - reward: 0.0117>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0105done, took 38.644 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 4\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0120done, took 38.889 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  320/10000 [..............................] - ETA: 2:30 - reward: 0.0031>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0045done, took 38.938 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1480/10000 [===>..........................] - ETA: 2:43 - reward: 0.0041>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0040done, took 38.879 seconds\n",
      "  g_loss: 0.50367355\n",
      "Evasions: 76\n",
      "Time left = 6.64 hours\n",
      "Total Time Taken: 75.9 minutes\n",
      "epoch_number: 24 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0125done, took 38.975 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  437/10000 [>.............................] - ETA: 2:38 - reward: 0.0069>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0055done, took 38.623 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1571/10000 [===>..........................] - ETA: 2:41 - reward: 0.0051>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0050done, took 38.566 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0030done, took 38.262 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  756/10000 [=>............................] - ETA: 2:47 - reward: 0.0040>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0050done, took 38.684 seconds\n",
      "  g_loss: 0.54718304\n",
      "Evasions: 62\n",
      "Time left = 6.6 hours\n",
      "Total Time Taken: 79.1 minutes\n",
      "epoch_number: 25 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1784/10000 [====>.........................] - ETA: 2:38 - reward: 0.0101>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0090done, took 38.832 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0040done, took 38.649 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  943/10000 [=>............................] - ETA: 2:48 - reward: 0.0053>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0040done, took 38.487 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0040done, took 38.215 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  126/10000 [..............................] - ETA: 1:16 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0030done, took 38.004 seconds\n",
      "  g_loss: 0.48606932\n",
      "Evasions: 48\n",
      "Time left = 6.55 hours\n",
      "Total Time Taken: 82.4 minutes\n",
      "epoch_number: 26 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1262/10000 [==>...........................] - ETA: 2:43 - reward: 0.0079>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0060done, took 38.343 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0035done, took 37.984 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  443/10000 [>.............................] - ETA: 2:36 - reward: 0.0045>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0035done, took 38.342 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1653/10000 [===>..........................] - ETA: 2:37 - reward: 0.0030>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0030done, took 38.080 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 8\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0040done, took 38.426 seconds\n",
      "  g_loss: 0.45502007\n",
      "Evasions: 40\n",
      "Time left = 6.5 hours\n",
      "Total Time Taken: 85.7 minutes\n",
      "epoch_number: 27 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "Attacks: 11\n",
      "  776/10000 [=>............................] - ETA: 2:44 - reward: 0.0090>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0065done, took 38.269 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0010done, took 38.517 seconds\n",
      "Training for 2000 steps ...\n",
      ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 5.0000e-04done, took 38.546 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1299/10000 [==>...........................] - ETA: 2:44 - reward: 7.6982e-04>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 5.0050e-04done, took 38.463 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.446 seconds\n",
      "  g_loss: 0.4727463\n",
      "Evasions: 17\n",
      "Time left = 6.46 hours\n",
      "Total Time Taken: 88.9 minutes\n",
      "epoch_number: 28 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  602/10000 [>.............................] - ETA: 2:44 - reward: 0.0017>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0015done, took 38.960 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1880/10000 [====>.........................] - ETA: 2:37 - reward: 0.0011>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0010done, took 38.964 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.842 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1185/10000 [==>...........................] - ETA: 2:47 - reward: 8.4388e-04>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 5.0025e-04done, took 39.063 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.623 seconds\n",
      "  g_loss: 0.41223407\n",
      "Evasions: 6\n",
      "Time left = 6.41 hours\n",
      "Total Time Taken: 92.2 minutes\n",
      "epoch_number: 29 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  486/10000 [>.............................] - ETA: 2:41 - reward: 0.0021>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0040done, took 38.853 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1732/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.366 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 5.0025e-04done, took 38.756 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1035/10000 [==>...........................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.719 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.013 seconds\n",
      "  g_loss: 0.4100424\n",
      "Evasions: 9\n",
      "Time left = 6.37 hours\n",
      "Total Time Taken: 95.5 minutes\n",
      "epoch_number: 30 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  350/10000 [>.............................] - ETA: 2:30 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0015done, took 38.654 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1640/10000 [===>..........................] - ETA: 2:37 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.068 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.296 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  955/10000 [=>............................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.618 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.243 seconds\n",
      "  g_loss: 0.39348054\n",
      "Evasions: 3\n",
      "Time left = 6.32 hours\n",
      "Total Time Taken: 98.8 minutes\n",
      "epoch_number: 31 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  260/10000 [..............................] - ETA: 2:19 - reward: 0.0038>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0010done, took 38.204 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1576/10000 [===>..........................] - ETA: 2:39 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.201 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.317 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  891/10000 [=>............................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.431 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.508 seconds\n",
      "  g_loss: 0.36243993\n",
      "Evasions: 2\n",
      "Time left = 6.27 hours\n",
      "Total Time Taken: 102.0 minutes\n",
      "epoch_number: 32 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  206/10000 [..............................] - ETA: 2:04 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.178 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1545/10000 [===>..........................] - ETA: 2:40 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.328 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.417 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  860/10000 [=>............................] - ETA: 2:44 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.272 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.175 seconds\n",
      "  g_loss: 0.36302927\n",
      "Evasions: 0\n",
      "Time left = 6.22 hours\n",
      "Total Time Taken: 105.3 minutes\n",
      "epoch_number: 33 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  177/10000 [..............................] - ETA: 1:53 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.400 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1503/10000 [===>..........................] - ETA: 2:41 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.315 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.131 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  818/10000 [=>............................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.087 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.534 seconds\n",
      "  g_loss: 0.36908174\n",
      "Evasions: 0\n",
      "Time left = 6.17 hours\n",
      "Total Time Taken: 108.5 minutes\n",
      "epoch_number: 34 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  135/10000 [..............................] - ETA: 1:25 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.214 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1461/10000 [===>..........................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.598 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.444 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  778/10000 [=>............................] - ETA: 2:46 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.665 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.675 seconds\n",
      "  g_loss: 0.32598504\n",
      "Evasions: 0\n",
      "Time left = 6.12 hours\n",
      "Total Time Taken: 111.8 minutes\n",
      "epoch_number: 35 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  105/10000 [..............................] - ETA: 55s - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.659 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1434/10000 [===>..........................] - ETA: 2:41 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.431 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.229 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  749/10000 [=>............................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.018 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.449 seconds\n",
      "  g_loss: 0.32059777\n",
      "Evasions: 0\n",
      "Time left = 6.07 hours\n",
      "Total Time Taken: 115.1 minutes\n",
      "epoch_number: 36 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   63/10000 [..............................] - ETA: 51s - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.541 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1391/10000 [===>..........................] - ETA: 2:44 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.633 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.053 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  705/10000 [=>............................] - ETA: 2:46 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.514 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.285 seconds\n",
      "  g_loss: 0.30876118\n",
      "Evasions: 0\n",
      "Time left = 6.02 hours\n",
      "Total Time Taken: 118.3 minutes\n",
      "epoch_number: 37 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   22/10000 [..............................] - ETA: 51s - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.413 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1348/10000 [===>..........................] - ETA: 2:41 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.053 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.420 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  676/10000 [=>............................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.382 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.655 seconds\n",
      "  g_loss: 0.2993845\n",
      "Evasions: 0\n",
      "Time left = 5.97 hours\n",
      "Total Time Taken: 121.6 minutes\n",
      "epoch_number: 38 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.482 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1319/10000 [==>...........................] - ETA: 2:43 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.267 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.385 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  636/10000 [>.............................] - ETA: 2:46 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.767 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1964/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.513 seconds\n",
      "  g_loss: 0.30245468\n",
      "Evasions: 0\n",
      "Time left = 5.92 hours\n",
      "Total Time Taken: 124.9 minutes\n",
      "epoch_number: 39 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 1999/10000 [====>.........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.166 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1279/10000 [==>...........................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.232 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.954 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  594/10000 [>.............................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.552 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1922/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.448 seconds\n",
      "  g_loss: 0.28686643\n",
      "Evasions: 0\n",
      "Time left = 5.87 hours\n",
      "Total Time Taken: 128.1 minutes\n",
      "epoch_number: 40 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.455 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1250/10000 [==>...........................] - ETA: 2:44 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.506 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.299 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  565/10000 [>.............................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.172 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1893/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.345 seconds\n",
      "  g_loss: 0.24943203\n",
      "Evasions: 0\n",
      "Time left = 5.82 hours\n",
      "Total Time Taken: 131.4 minutes\n",
      "epoch_number: 41 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.786 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1206/10000 [==>...........................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.597 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.222 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  522/10000 [>.............................] - ETA: 2:43 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.594 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1851/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.600 seconds\n",
      "  g_loss: 0.2750051\n",
      "Evasions: 0\n",
      "Time left = 5.77 hours\n",
      "Total Time Taken: 134.7 minutes\n",
      "epoch_number: 42 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.253 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1166/10000 [==>...........................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.433 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.634 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  492/10000 [>.............................] - ETA: 2:41 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.674 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1822/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.559 seconds\n",
      "  g_loss: 0.27233243\n",
      "Evasions: 0\n",
      "Time left = 5.72 hours\n",
      "Total Time Taken: 137.9 minutes\n",
      "epoch_number: 43 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:32 - reward: 0.0000e+00done, took 38.198 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1137/10000 [==>...........................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.805 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.508 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  452/10000 [>.............................] - ETA: 2:38 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.394 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1778/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.591 seconds\n",
      "  g_loss: 0.24178703\n",
      "Evasions: 0\n",
      "Time left = 5.67 hours\n",
      "Total Time Taken: 141.2 minutes\n",
      "epoch_number: 44 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.607 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1095/10000 [==>...........................] - ETA: 2:44 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.282 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.410 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  410/10000 [>.............................] - ETA: 2:38 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:33 - reward: 0.0000e+00done, took 38.469 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1738/10000 [====>.........................] - ETA: 2:39 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.874 seconds\n",
      "  g_loss: 0.21689337\n",
      "Evasions: 0\n",
      "Time left = 5.62 hours\n",
      "Total Time Taken: 144.5 minutes\n",
      "epoch_number: 45 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.635 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1064/10000 [==>...........................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.544 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 38.980 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  379/10000 [>.............................] - ETA: 2:35 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.605 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1707/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.502 seconds\n",
      "  g_loss: 0.23035069\n",
      "Evasions: 0\n",
      "Time left = 5.57 hours\n",
      "Total Time Taken: 147.8 minutes\n",
      "epoch_number: 46 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.829 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1024/10000 [==>...........................] - ETA: 2:46 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.656 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.127 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  337/10000 [>.............................] - ETA: 2:34 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.109 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1665/10000 [===>..........................] - ETA: 2:40 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.879 seconds\n",
      "  g_loss: 0.20976852\n",
      "Evasions: 0\n",
      "Time left = 5.52 hours\n",
      "Total Time Taken: 151.1 minutes\n",
      "epoch_number: 47 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.492 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  982/10000 [=>............................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.582 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.578 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  297/10000 [..............................] - ETA: 2:25 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.521 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1638/10000 [===>..........................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 38.983 seconds\n",
      "  g_loss: 0.22174342\n",
      "Evasions: 0\n",
      "Time left = 5.47 hours\n",
      "Total Time Taken: 154.4 minutes\n",
      "epoch_number: 48 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.529 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  951/10000 [=>............................] - ETA: 2:49 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.850 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.719 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  266/10000 [..............................] - ETA: 2:21 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.800 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1596/10000 [===>..........................] - ETA: 2:41 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.814 seconds\n",
      "  g_loss: 0.21165076\n",
      "Evasions: 0\n",
      "Time left = 5.42 hours\n",
      "Total Time Taken: 157.6 minutes\n",
      "epoch_number: 49 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.896 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  911/10000 [=>............................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.007 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.047 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  226/10000 [..............................] - ETA: 2:10 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.745 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1554/10000 [===>..........................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.832 seconds\n",
      "  g_loss: 0.20997502\n",
      "Evasions: 0\n",
      "Time left = 5.36 hours\n",
      "Total Time Taken: 161.0 minutes\n",
      "epoch_number: 50 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.138 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  869/10000 [=>............................] - ETA: 2:46 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.547 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.331 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  197/10000 [..............................] - ETA: 2:02 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.737 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1525/10000 [===>..........................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.817 seconds\n",
      "  g_loss: 0.2041375\n",
      "Evasions: 0\n",
      "Time left = 5.31 hours\n",
      "Total Time Taken: 164.3 minutes\n",
      "epoch_number: 51 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.729 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  838/10000 [=>............................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.827 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.318 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  153/10000 [..............................] - ETA: 1:44 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.542 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1481/10000 [===>..........................] - ETA: 2:43 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.935 seconds\n",
      "  g_loss: 0.18293782\n",
      "Evasions: 0\n",
      "Time left = 5.26 hours\n",
      "Total Time Taken: 167.6 minutes\n",
      "epoch_number: 52 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.173 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  798/10000 [=>............................] - ETA: 2:50 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.168 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.670 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  113/10000 [..............................] - ETA: 1:06 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.665 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1439/10000 [===>..........................] - ETA: 2:43 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.890 seconds\n",
      "  g_loss: 0.17704833\n",
      "Evasions: 0\n",
      "Time left = 5.21 hours\n",
      "Total Time Taken: 170.9 minutes\n",
      "epoch_number: 53 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.108 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  767/10000 [=>............................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.427 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.635 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   80/10000 [..............................] - ETA: 52s - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.530 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1412/10000 [===>..........................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.936 seconds\n",
      "  g_loss: 0.17400791\n",
      "Evasions: 0\n",
      "Time left = 5.16 hours\n",
      "Total Time Taken: 174.2 minutes\n",
      "epoch_number: 54 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.791 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  725/10000 [=>............................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.919 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.871 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   42/10000 [..............................] - ETA: 51s - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.715 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1370/10000 [===>..........................] - ETA: 2:44 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.793 seconds\n",
      "  g_loss: 0.17446494\n",
      "Evasions: 0\n",
      "Time left = 5.11 hours\n",
      "Total Time Taken: 177.5 minutes\n",
      "epoch_number: 55 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.934 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  683/10000 [=>............................] - ETA: 2:50 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.952 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.537 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   12/10000 [..............................] - ETA: 48s - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.594 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1341/10000 [===>..........................] - ETA: 2:44 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.604 seconds\n",
      "  g_loss: 0.16205513\n",
      "Evasions: 0\n",
      "Time left = 5.06 hours\n",
      "Total Time Taken: 180.8 minutes\n",
      "epoch_number: 56 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.977 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  654/10000 [>.............................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.874 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1982/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.480 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.561 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1299/10000 [==>...........................] - ETA: 2:44 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.760 seconds\n",
      "  g_loss: 0.16244277\n",
      "Evasions: 0\n",
      "Time left = 5.01 hours\n",
      "Total Time Taken: 184.1 minutes\n",
      "epoch_number: 57 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.226 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  614/10000 [>.............................] - ETA: 2:46 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.137 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1942/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.339 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.871 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1257/10000 [==>...........................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.724 seconds\n",
      "  g_loss: 0.16950166\n",
      "Evasions: 0\n",
      "Time left = 4.95 hours\n",
      "Total Time Taken: 187.4 minutes\n",
      "epoch_number: 58 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.140 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  583/10000 [>.............................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.979 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1911/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.217 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.082 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1226/10000 [==>...........................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.932 seconds\n",
      "  g_loss: 0.15340225\n",
      "Evasions: 0\n",
      "Time left = 4.9 hours\n",
      "Total Time Taken: 190.7 minutes\n",
      "epoch_number: 59 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.024 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  541/10000 [>.............................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.501 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1870/10000 [====>.........................] - ETA: 2:39 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.406 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.945 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1184/10000 [==>...........................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.047 seconds\n",
      "  g_loss: 0.13790849\n",
      "Evasions: 0\n",
      "Time left = 4.85 hours\n",
      "Total Time Taken: 194.1 minutes\n",
      "epoch_number: 60 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.759 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  499/10000 [>.............................] - ETA: 2:41 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.181 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1829/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.945 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.381 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1155/10000 [==>...........................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.812 seconds\n",
      "  g_loss: 0.13990408\n",
      "Evasions: 0\n",
      "Time left = 4.8 hours\n",
      "Total Time Taken: 197.4 minutes\n",
      "epoch_number: 61 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.765 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  470/10000 [>.............................] - ETA: 2:40 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 38.997 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1798/10000 [====>.........................] - ETA: 2:40 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.156 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.705 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1113/10000 [==>...........................] - ETA: 2:46 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.762 seconds\n",
      "  g_loss: 0.1390024\n",
      "Evasions: 0\n",
      "Time left = 4.75 hours\n",
      "Total Time Taken: 200.7 minutes\n",
      "epoch_number: 62 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.661 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  428/10000 [>.............................] - ETA: 2:39 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.807 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1756/10000 [====>.........................] - ETA: 2:39 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.003 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.069 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1071/10000 [==>...........................] - ETA: 2:51 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.226 seconds\n",
      "  g_loss: 0.14069086\n",
      "Evasions: 0\n",
      "Time left = 4.69 hours\n",
      "Total Time Taken: 204.0 minutes\n",
      "epoch_number: 63 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.148 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  386/10000 [>.............................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.466 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1727/10000 [====>.........................] - ETA: 2:41 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.176 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.848 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1044/10000 [==>...........................] - ETA: 2:50 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.304 seconds\n",
      "  g_loss: 0.12421049\n",
      "Evasions: 0\n",
      "Time left = 4.64 hours\n",
      "Total Time Taken: 207.3 minutes\n",
      "epoch_number: 64 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.722 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  359/10000 [>.............................] - ETA: 2:36 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.302 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1685/10000 [====>.........................] - ETA: 2:40 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.984 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.388 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1002/10000 [==>...........................] - ETA: 2:52 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.550 seconds\n",
      "  g_loss: 0.12440026\n",
      "Evasions: 0\n",
      "Time left = 4.59 hours\n",
      "Total Time Taken: 210.7 minutes\n",
      "epoch_number: 65 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.206 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  315/10000 [..............................] - ETA: 2:31 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.172 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1643/10000 [===>..........................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.238 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.819 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  960/10000 [=>............................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.834 seconds\n",
      "  g_loss: 0.13508976\n",
      "Evasions: 0\n",
      "Time left = 4.54 hours\n",
      "Total Time Taken: 214.0 minutes\n",
      "epoch_number: 66 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.789 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  286/10000 [..............................] - ETA: 2:25 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.819 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1614/10000 [===>..........................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.007 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:34 - reward: 0.0000e+00done, took 38.669 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  931/10000 [=>............................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.817 seconds\n",
      "  g_loss: 0.13302737\n",
      "Evasions: 0\n",
      "Time left = 4.49 hours\n",
      "Total Time Taken: 217.3 minutes\n",
      "epoch_number: 67 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.104 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  246/10000 [..............................] - ETA: 2:18 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.006 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1574/10000 [===>..........................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.956 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.041 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  889/10000 [=>............................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.947 seconds\n",
      "  g_loss: 0.1276816\n",
      "Evasions: 0\n",
      "Time left = 4.43 hours\n",
      "Total Time Taken: 220.6 minutes\n",
      "epoch_number: 68 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.906 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  202/10000 [..............................] - ETA: 2:04 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.920 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1530/10000 [===>..........................] - ETA: 2:43 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.042 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.449 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  860/10000 [=>............................] - ETA: 2:50 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.003 seconds\n",
      "  g_loss: 0.13120055\n",
      "Evasions: 0\n",
      "Time left = 4.38 hours\n",
      "Total Time Taken: 223.9 minutes\n",
      "epoch_number: 69 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.882 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  175/10000 [..............................] - ETA: 1:51 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.083 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1503/10000 [===>..........................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.849 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.793 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  818/10000 [=>............................] - ETA: 2:50 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.160 seconds\n",
      "  g_loss: 0.10886464\n",
      "Evasions: 0\n",
      "Time left = 4.33 hours\n",
      "Total Time Taken: 227.2 minutes\n",
      "epoch_number: 70 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:39 - reward: 0.0000e+00done, took 39.818 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  133/10000 [..............................] - ETA: 1:28 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.284 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1461/10000 [===>..........................] - ETA: 2:43 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.827 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.100 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  776/10000 [=>............................] - ETA: 2:50 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.986 seconds\n",
      "  g_loss: 0.11740084\n",
      "Evasions: 0\n",
      "Time left = 4.28 hours\n",
      "Total Time Taken: 230.5 minutes\n",
      "epoch_number: 71 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.942 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  102/10000 [..............................] - ETA: 51s - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.855 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1430/10000 [===>..........................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.319 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.313 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  745/10000 [=>............................] - ETA: 2:52 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:39 - reward: 0.0000e+00done, took 39.817 seconds\n",
      "  g_loss: 0.11740885\n",
      "Evasions: 0\n",
      "Time left = 4.22 hours\n",
      "Total Time Taken: 233.9 minutes\n",
      "epoch_number: 72 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.725 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   58/10000 [..............................] - ETA: 54s - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.086 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1390/10000 [===>..........................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.262 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.017 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  703/10000 [=>............................] - ETA: 2:49 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.180 seconds\n",
      "  g_loss: 0.114574485\n",
      "Evasions: 0\n",
      "Time left = 4.17 hours\n",
      "Total Time Taken: 237.2 minutes\n",
      "epoch_number: 73 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.505 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "   20/10000 [..............................] - ETA: 54s - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.161 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1348/10000 [===>..........................] - ETA: 2:46 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.060 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.098 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  674/10000 [=>............................] - ETA: 2:49 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.887 seconds\n",
      "  g_loss: 0.11791\n",
      "Evasions: 0\n",
      "Time left = 4.12 hours\n",
      "Total Time Taken: 240.5 minutes\n",
      "epoch_number: 74 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.066 seconds\n",
      "Training for 2000 steps ...\n",
      ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.905 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1319/10000 [==>...........................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.189 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.922 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  634/10000 [>.............................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.978 seconds\n",
      "  g_loss: 0.12327953\n",
      "Evasions: 0\n",
      "Time left = 4.06 hours\n",
      "Total Time Taken: 243.9 minutes\n",
      "epoch_number: 75 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 1962/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.809 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:39 - reward: 0.0000e+00done, took 39.816 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1275/10000 [==>...........................] - ETA: 2:49 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:39 - reward: 0.0000e+00done, took 39.739 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.482 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  592/10000 [>.............................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.123 seconds\n",
      "  g_loss: 0.11252761\n",
      "Evasions: 0\n",
      "Time left = 4.01 hours\n",
      "Total Time Taken: 247.2 minutes\n",
      "epoch_number: 76 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1918/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.089 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.552 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1247/10000 [==>...........................] - ETA: 2:46 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.815 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00done, took 39.530 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  563/10000 [>.............................] - ETA: 2:48 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.445 seconds\n",
      "  g_loss: 0.11382735\n",
      "Evasions: 0\n",
      "Time left = 3.96 hours\n",
      "Total Time Taken: 250.5 minutes\n",
      "epoch_number: 77 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1889/10000 [====>.........................] - ETA: 2:39 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.372 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.393 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1204/10000 [==>...........................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.122 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.762 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  519/10000 [>.............................] - ETA: 2:45 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.291 seconds\n",
      "  g_loss: 0.102913134\n",
      "Evasions: 0\n",
      "Time left = 3.91 hours\n",
      "Total Time Taken: 253.9 minutes\n",
      "epoch_number: 78 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1847/10000 [====>.........................] - ETA: 2:38 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.947 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.423 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1162/10000 [==>...........................] - ETA: 2:52 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:39 - reward: 0.0000e+00done, took 39.835 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.911 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  479/10000 [>.............................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.250 seconds\n",
      "  g_loss: 0.10391149\n",
      "Evasions: 0\n",
      "Time left = 3.85 hours\n",
      "Total Time Taken: 257.2 minutes\n",
      "epoch_number: 79 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1820/10000 [====>.........................] - ETA: 2:40 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:37 - reward: 0.0000e+00done, took 39.301 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.162 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1135/10000 [==>...........................] - ETA: 2:49 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.095 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.151 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  448/10000 [>.............................] - ETA: 2:42 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.149 seconds\n",
      "  g_loss: 0.10638772\n",
      "Evasions: 0\n",
      "Time left = 3.8 hours\n",
      "Total Time Taken: 260.5 minutes\n",
      "epoch_number: 80 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1776/10000 [====>.........................] - ETA: 2:39 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.995 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1998/10000 [====>.........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.141 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1093/10000 [==>...........................] - ETA: 2:47 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.778 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.787 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  408/10000 [>.............................] - ETA: 2:37 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 1999/10000 [====>.........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.862 seconds\n",
      "  g_loss: 0.094192594\n",
      "Evasions: 0\n",
      "Time left = 3.75 hours\n",
      "Total Time Taken: 263.8 minutes\n",
      "epoch_number: 81 completed\n",
      "======================================================\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 1734/10000 [====>.........................] - ETA: 2:40 - reward: 0.0000e+00>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Done<<<<<<<<<<<<<<<<<<<<<<<<<\n",
      " 2000/10000 [=====>........................] - ETA: 2:35 - reward: 0.0000e+00done, took 38.857 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      " 2000/10000 [=====>........................] - ETA: 2:36 - reward: 0.0000e+00done, took 39.133 seconds\n",
      "Training for 2000 steps ...\n",
      "Interval 1 (0 steps performed)\n",
      "  710/10000 [=>............................] - ETA: 2:48 - reward: 0.0000e+00"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "probabilities contain NaN",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Input \u001b[0;32mIn [21]\u001b[0m, in \u001b[0;36m<cell line: 88>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     81\u001b[0m \u001b[38;5;28mprint\u001b[39m(TODAY)\n\u001b[1;32m     84\u001b[0m arguments \u001b[38;5;241m=\u001b[39m [rand_noise_dim, nb_steps, batch_size, \n\u001b[1;32m     85\u001b[0m             k_d, k_g, critic_pre_train_steps, log_interval, learning_rate, base_n_count,\n\u001b[1;32m     86\u001b[0m             CACHE_PATH, FIGS_PATH, show, test_size, gpu_device, EVALUATION_PARAMETER, TODAY , DATA_SET]\n\u001b[0;32m---> 88\u001b[0m best_losses \u001b[38;5;241m=\u001b[39m \u001b[43mtrain_RELEVAGAN_CC\u001b[49m\u001b[43m(\u001b[49m\u001b[43marguments\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrain\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mTrain\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdata_cols\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/Insync/rhr407@gmail.com/Google Drive/PhD/Development/code/RELEVAGAN_DQN_Agent/relevagan.py:1414\u001b[0m, in \u001b[0;36mtrain_RELEVAGAN_CC\u001b[0;34m(arguments, train, Train, data_cols, label_cols, seed, starting_step)\u001b[0m\n\u001b[1;32m   1347\u001b[0m (\n\u001b[1;32m   1348\u001b[0m     comb_loss,\n\u001b[1;32m   1349\u001b[0m     disc_loss_generated,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m   1359\u001b[0m     epoch_list_comb_loss,\n\u001b[1;32m   1360\u001b[0m ) \u001b[38;5;241m=\u001b[39m ([], [], [], [], [], [], [], [], [], [], [], [])\n\u001b[1;32m   1361\u001b[0m model_components \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m   1362\u001b[0m     cache_prefix,\n\u001b[1;32m   1363\u001b[0m     with_class,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m   1399\u001b[0m     DATA_SET,\n\u001b[1;32m   1400\u001b[0m ]\n\u001b[1;32m   1401\u001b[0m [\n\u001b[1;32m   1402\u001b[0m     best_xgb_acc_index,\n\u001b[1;32m   1403\u001b[0m     best_xgb_rcl_index,\n\u001b[1;32m   1404\u001b[0m     best_dt_acc_index,\n\u001b[1;32m   1405\u001b[0m     best_dt_rcl_index,\n\u001b[1;32m   1406\u001b[0m     best_nb_acc_index,\n\u001b[1;32m   1407\u001b[0m     best_nb_rcl_index,\n\u001b[1;32m   1408\u001b[0m     best_rf_acc_index,\n\u001b[1;32m   1409\u001b[0m     best_rf_rcl_index,\n\u001b[1;32m   1410\u001b[0m     best_lr_acc_index,\n\u001b[1;32m   1411\u001b[0m     best_lr_rcl_index,\n\u001b[1;32m   1412\u001b[0m     best_knn_acc_index,\n\u001b[1;32m   1413\u001b[0m     best_knn_rcl_index,\n\u001b[0;32m-> 1414\u001b[0m ] \u001b[38;5;241m=\u001b[39m \u001b[43mRELEVAGAN_CC\u001b[49m\u001b[43m(\u001b[49m\u001b[43mIMG_SHAPE\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mdata_cols\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_components\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1415\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m [\n\u001b[1;32m   1416\u001b[0m     best_xgb_acc_index,\n\u001b[1;32m   1417\u001b[0m     best_xgb_rcl_index,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m   1427\u001b[0m     best_knn_rcl_index,\n\u001b[1;32m   1428\u001b[0m ]\n",
      "File \u001b[0;32m~/Insync/rhr407@gmail.com/Google Drive/PhD/Development/code/RELEVAGAN_DQN_Agent/relevagan.py:880\u001b[0m, in \u001b[0;36mRELEVAGAN_CC.train\u001b[0;34m(self, model_components)\u001b[0m\n\u001b[1;32m    877\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mRL_TRAIN_BATCH \u001b[38;5;241m=\u001b[39m t_b[data_cols]\u001b[38;5;241m.\u001b[39mcopy()\n\u001b[1;32m    878\u001b[0m         \u001b[38;5;66;03m# print('self.RL_TRAIN_BATCH.shape[0]: ' + str(self.RL_TRAIN_BATCH.shape[0]))\u001b[39;00m\n\u001b[0;32m--> 880\u001b[0m         agent1 \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain_dqn_model\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    881\u001b[0m \u001b[43m            \u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m128\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m64\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    882\u001b[0m \u001b[43m            \u001b[49m\u001b[43mrounds\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m2000\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m    883\u001b[0m \u001b[43m            \u001b[49m\u001b[43mrun_test\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m    884\u001b[0m \u001b[43m            \u001b[49m\u001b[43menv\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m    885\u001b[0m \u001b[43m        \u001b[49m\u001b[43m)\u001b[49m  \u001b[38;5;66;03m# black blox\u001b[39;00m\n\u001b[1;32m    887\u001b[0m         \u001b[38;5;66;03m# agent_time_taken = time.time() - agent_time_start\u001b[39;00m\n\u001b[1;32m    888\u001b[0m \n\u001b[1;32m    889\u001b[0m         \u001b[38;5;66;03m# print(\"Agent time taken:\" + str(agent_time_taken))\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    895\u001b[0m \n\u001b[1;32m    896\u001b[0m         \u001b[38;5;66;03m# self.ROUNDS_FACTOR += 1\u001b[39;00m\n\u001b[1;32m    898\u001b[0m d_l_N \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdiscriminator\u001b[38;5;241m.\u001b[39mtrain_on_batch(\n\u001b[1;32m    899\u001b[0m     T_b[data_cols], [real, labels, Labels]\n\u001b[1;32m    900\u001b[0m )\n",
      "File \u001b[0;32m~/Insync/rhr407@gmail.com/Google Drive/PhD/Development/code/RELEVAGAN_DQN_Agent/relevagan.py:533\u001b[0m, in \u001b[0;36mRELEVAGAN_CC.train_dqn_model\u001b[0;34m(self, layers, rounds, run_test, use_score, env)\u001b[0m\n\u001b[1;32m    529\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39magent\u001b[38;5;241m.\u001b[39mcompile(Adam(lr\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1e-3\u001b[39m), metrics\u001b[38;5;241m=\u001b[39m[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmae\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[1;32m    531\u001b[0m \u001b[38;5;66;03m# play the game. learn something!\u001b[39;00m\n\u001b[0;32m--> 533\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43magent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43menv\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnb_steps\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrounds\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvisualize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m    535\u001b[0m \u001b[38;5;66;03m# self.agent.test(env, nb_episodes=100, visualize=False)\u001b[39;00m\n\u001b[1;32m    537\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39magent\n",
      "File \u001b[0;32m~/anaconda3/lib/python3.9/site-packages/rl/core.py:168\u001b[0m, in \u001b[0;36mAgent.fit\u001b[0;34m(self, env, nb_steps, action_repetition, callbacks, verbose, visualize, nb_max_start_steps, start_step_policy, log_interval, nb_max_episode_steps)\u001b[0m\n\u001b[1;32m    165\u001b[0m callbacks\u001b[38;5;241m.\u001b[39mon_step_begin(episode_step)\n\u001b[1;32m    166\u001b[0m \u001b[38;5;66;03m# This is were all of the work happens. We first perceive and compute the action\u001b[39;00m\n\u001b[1;32m    167\u001b[0m \u001b[38;5;66;03m# (forward step) and then use the reward to improve (backward step).\u001b[39;00m\n\u001b[0;32m--> 168\u001b[0m action \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mobservation\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    169\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprocessor \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m    170\u001b[0m     action \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprocessor\u001b[38;5;241m.\u001b[39mprocess_action(action)\n",
      "File \u001b[0;32m~/anaconda3/lib/python3.9/site-packages/rl/agents/dqn.py:226\u001b[0m, in \u001b[0;36mDQNAgent.forward\u001b[0;34m(self, observation)\u001b[0m\n\u001b[1;32m    224\u001b[0m q_values \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcompute_q_values(state)\n\u001b[1;32m    225\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtraining:\n\u001b[0;32m--> 226\u001b[0m     action \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpolicy\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mselect_action\u001b[49m\u001b[43m(\u001b[49m\u001b[43mq_values\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mq_values\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    227\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    228\u001b[0m     action \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtest_policy\u001b[38;5;241m.\u001b[39mselect_action(q_values\u001b[38;5;241m=\u001b[39mq_values)\n",
      "File \u001b[0;32m~/anaconda3/lib/python3.9/site-packages/rl/policy.py:226\u001b[0m, in \u001b[0;36mBoltzmannQPolicy.select_action\u001b[0;34m(self, q_values)\u001b[0m\n\u001b[1;32m    224\u001b[0m exp_values \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mexp(np\u001b[38;5;241m.\u001b[39mclip(q_values \u001b[38;5;241m/\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtau, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclip[\u001b[38;5;241m0\u001b[39m], \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclip[\u001b[38;5;241m1\u001b[39m]))\n\u001b[1;32m    225\u001b[0m probs \u001b[38;5;241m=\u001b[39m exp_values \u001b[38;5;241m/\u001b[39m np\u001b[38;5;241m.\u001b[39msum(exp_values)\n\u001b[0;32m--> 226\u001b[0m action \u001b[38;5;241m=\u001b[39m \u001b[43mnp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrandom\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchoice\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mrange\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mnb_actions\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mp\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mprobs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    227\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m action\n",
      "File \u001b[0;32mmtrand.pyx:935\u001b[0m, in \u001b[0;36mnumpy.random.mtrand.RandomState.choice\u001b[0;34m()\u001b[0m\n",
      "\u001b[0;31mValueError\u001b[0m: probabilities contain NaN"
     ]
    }
   ],
   "source": [
    "import header\n",
    "import importlib\n",
    "importlib.reload(header) # For reloading after making changes\n",
    "from header import *\n",
    "\n",
    "\n",
    "gpu_device = '/device:GPU:1'\n",
    "physical_devices = tf.config.list_physical_devices('GPU') \n",
    "for gpu_instance in physical_devices: \n",
    "    tf.config.experimental.set_memory_growth(gpu_instance, True)\n",
    "\n",
    "#----------------------------------\n",
    "# Set neurons and batch size\n",
    "#----------------------------------\n",
    "base_n_count = 256\n",
    "batch_size =  256\n",
    "#----------------------------------\n",
    "\n",
    "\n",
    "result = train_data\n",
    "\n",
    "remaining = train_data.shape[0] % batch_size\n",
    "\n",
    "if remaining > 0:\n",
    "    if remaining < train_data.shape[0]:\n",
    "        additional = batch_size - remaining\n",
    "        _additional = train_data.loc[train_data.shape[0]-additional:train_data.shape[0],: ]  \n",
    "        \n",
    "        frames = [train_data, _additional]\n",
    "        result = pd.concat(frames).reset_index(drop=True)\n",
    "\n",
    "print('Result: ' + str(result.shape))\n",
    "\n",
    "# batch_size = 1\n",
    "# ---------------------------------\n",
    "nb_steps = required_epochs * result.shape[0] // batch_size\n",
    "\n",
    "log_interval = result.shape[0] // batch_size # We are setting this as an epoch. This depends on data size.\n",
    "\n",
    "print(\"log_interval : \" + str(log_interval))\n",
    "\n",
    "# nb_steps = TRAINING_ITERATIONS  # 50000 # Add one for logging of the last interval\n",
    "print(\"Total Batch Iterations: \" + str(nb_steps))\n",
    "rand_noise_dim = 100 \n",
    "\n",
    "\n",
    "k_d = 1  # number of critic network updates per adversarial training step\n",
    "k_g = 1  # number of generator network updates per adversarial training step\n",
    "\n",
    "critic_pre_train_steps = 100# 100  # number of steps to pre-train the critic before starting adversarial training\n",
    "\n",
    "generator_model_path, discriminator_model_path, loss_pickle_path = None, None, None\n",
    "\n",
    "show = True \n",
    "train = result#.copy().reset_index(drop=True) # botnet only with labels from classification\n",
    "\n",
    "\n",
    "\n",
    "label_cols = [ i for i in train.columns if 'Label' in i ]\n",
    "\n",
    "data_cols = [ i for i in train.columns if i not in label_cols ]\n",
    "\n",
    "print(data_cols)\n",
    "\n",
    "train_no_label = train[ data_cols ]\n",
    "\n",
    "train_no_label = round(train_no_label, 4)\n",
    "\n",
    "# if SAVE_ONLY_BOT_DATA:\n",
    "#     train_no_label.to_csv(str(DATA_SET_PATH) + 'ONLY_BOTNET_DATA_(Preprocessed).csv')\n",
    "#     print('File: ' + 'ONLY_BOTNET_DATA_(Preprocessed).csv saved to directory')   \n",
    "\n",
    "\n",
    "\n",
    "test_size = train.shape[0] \n",
    "learning_rate = 5e-4\n",
    "\n",
    "\n",
    "TODAY = DATA_SET + '_' + str(datetime.datetime.now()) \n",
    "\n",
    "print(TODAY)\n",
    "\n",
    "\n",
    "arguments = [rand_noise_dim, nb_steps, batch_size, \n",
    "            k_d, k_g, critic_pre_train_steps, log_interval, learning_rate, base_n_count,\n",
    "            CACHE_PATH, FIGS_PATH, show, test_size, gpu_device, EVALUATION_PARAMETER, TODAY , DATA_SET]\n",
    "\n",
    "best_losses = train_RELEVAGAN_CC(arguments, train, Train, data_cols)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "celltoolbar": "Slideshow",
  "colab": {
   "collapsed_sections": [],
   "name": "RELEVAGAN.ipynb",
   "provenance": []
  },
  "interpreter": {
   "hash": "0754585e5bea998e5d67e8f88be1e2a4051f453a7d5aedf516d053743049d686"
  },
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
