{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'1.4.1'"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import scipy\n",
    "scipy.__version__"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Python 3.6.10 :: Anaconda, Inc.\r\n"
     ]
    }
   ],
   "source": [
    "!python -V"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# TODS"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Introduction Summary"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "TODS is a full-stack automated machine learning system for outlier detection on multivariate time-series data. TODS provides exhaustive modules for building machine learning-based outlier detection systems, including: data processing, time series processing, feature analysis (extraction), detection algorithms, and reinforcement module. The functionalities provided via these modules include data preprocessing for general purposes, time series data smoothing/transformation, extracting features from time/frequency domains, various detection algorithms, and involving human expertise to calibrate the system. Three common outlier detection scenarios on time-series data can be performed: point-wise detection (time points as outliers), pattern-wise detection (subsequences as outliers), and system-wise detection (sets of time series as outliers), and a wide-range of corresponding algorithms are provided in TODS. This package is developed by DATA Lab @ Texas A&M University."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Packages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Obtaining tods from git+https://github.com/datamllab/tods.git#egg=tods\n",
      "  Cloning https://github.com/datamllab/tods.git to ./src/tods\n",
      "  Running command git clone -q https://github.com/datamllab/tods.git '/Users/wangyanghe/Desktop/Research/Tods Notebook/src/tods'\n",
      "Requirement already satisfied: Jinja2 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (2.11.3)\n",
      "Requirement already satisfied: numpy==1.18.2 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (1.18.2)\n",
      "Requirement already satisfied: simplejson==3.12.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (3.12.0)\n",
      "Requirement already satisfied: scikit-learn==0.22.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (0.22)\n",
      "Requirement already satisfied: statsmodels==0.11.1 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (0.11.1)\n",
      "Requirement already satisfied: PyWavelets>=1.1.1 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (1.1.1)\n",
      "Requirement already satisfied: pillow==7.1.2 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (7.1.2)\n",
      "Requirement already satisfied: tensorflow==2.2 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (2.2.0)\n",
      "Requirement already satisfied: keras in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (2.4.3)\n",
      "Requirement already satisfied: pyod in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (0.8.7)\n",
      "Requirement already satisfied: nimfa==1.4.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (1.4.0)\n",
      "Requirement already satisfied: stumpy==1.4.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (1.4.0)\n",
      "Requirement already satisfied: more-itertools==8.5.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tods) (8.5.0)\n",
      "Requirement already satisfied: MarkupSafe>=0.23 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from Jinja2->tods) (1.1.1)\n",
      "Requirement already satisfied: joblib>=0.11 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from scikit-learn==0.22.0->tods) (1.0.1)\n",
      "Requirement already satisfied: scipy>=0.17.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from scikit-learn==0.22.0->tods) (1.4.1)\n",
      "Requirement already satisfied: patsy>=0.5 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from statsmodels==0.11.1->tods) (0.5.1)\n",
      "Requirement already satisfied: pandas>=0.21 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from statsmodels==0.11.1->tods) (1.0.3)\n",
      "Requirement already satisfied: google-pasta>=0.1.8 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (0.2.0)\n",
      "Requirement already satisfied: protobuf>=3.8.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (3.15.8)\n",
      "Requirement already satisfied: six>=1.12.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (1.15.0)\n",
      "Requirement already satisfied: grpcio>=1.8.6 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (1.37.0)\n",
      "Requirement already satisfied: keras-preprocessing>=1.1.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (1.1.2)\n",
      "Requirement already satisfied: h5py<2.11.0,>=2.10.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (2.10.0)\n",
      "Requirement already satisfied: opt-einsum>=2.3.2 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (3.3.0)\n",
      "Requirement already satisfied: termcolor>=1.1.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (1.1.0)\n",
      "Requirement already satisfied: wheel>=0.26; python_version >= \"3\" in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (0.36.2)\n",
      "Requirement already satisfied: tensorboard<2.3.0,>=2.2.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (2.2.2)\n",
      "Requirement already satisfied: tensorflow-estimator<2.3.0,>=2.2.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (2.2.0)\n",
      "Requirement already satisfied: wrapt>=1.11.1 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (1.12.1)\n",
      "Requirement already satisfied: gast==0.3.3 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (0.3.3)\n",
      "Requirement already satisfied: astunparse==1.6.3 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (1.6.3)\n",
      "Requirement already satisfied: absl-py>=0.7.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorflow==2.2->tods) (0.12.0)\n",
      "Requirement already satisfied: pyyaml in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from keras->tods) (5.4.1)\n",
      "Requirement already satisfied: matplotlib in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from pyod->tods) (3.3.4)\n",
      "Requirement already satisfied: numba>=0.35 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from pyod->tods) (0.53.1)\n",
      "Requirement already satisfied: pytz>=2017.2 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from pandas>=0.21->statsmodels==0.11.1->tods) (2021.1)\n",
      "Requirement already satisfied: python-dateutil>=2.6.1 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from pandas>=0.21->statsmodels==0.11.1->tods) (2.8.1)\n",
      "Requirement already satisfied: requests<3,>=2.21.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (2.23.0)\n",
      "Requirement already satisfied: werkzeug>=0.11.15 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (1.0.1)\n",
      "Requirement already satisfied: setuptools>=41.0.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (56.0.0)\n",
      "Requirement already satisfied: markdown>=2.6.8 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (3.3.4)\n",
      "Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (1.8.0)\n",
      "Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (0.4.4)\n",
      "Requirement already satisfied: google-auth<2,>=1.6.3 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (1.28.1)\n",
      "Requirement already satisfied: cycler>=0.10 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from matplotlib->pyod->tods) (0.10.0)\n",
      "Requirement already satisfied: kiwisolver>=1.0.1 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from matplotlib->pyod->tods) (1.3.1)\n",
      "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from matplotlib->pyod->tods) (2.4.7)\n",
      "Requirement already satisfied: llvmlite<0.37,>=0.36.0rc1 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from numba>=0.35->pyod->tods) (0.36.0)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (2020.12.5)\n",
      "Requirement already satisfied: idna<3,>=2.5 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (2.10)\n",
      "Requirement already satisfied: chardet<4,>=3.0.2 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (3.0.4)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (1.25.11)\n",
      "Requirement already satisfied: importlib-metadata; python_version < \"3.8\" in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (3.10.0)\n",
      "Requirement already satisfied: requests-oauthlib>=0.7.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (1.3.0)\n",
      "Requirement already satisfied: rsa<5,>=3.1.4; python_version >= \"3.6\" in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (4.7.2)\n",
      "Requirement already satisfied: cachetools<5.0,>=2.0.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (4.2.1)\n",
      "Requirement already satisfied: pyasn1-modules>=0.2.1 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (0.2.8)\n",
      "Requirement already satisfied: zipp>=0.5 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (3.4.1)\n",
      "Requirement already satisfied: typing-extensions>=3.6.4; python_version < \"3.8\" in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (3.7.4.3)\n",
      "Requirement already satisfied: oauthlib>=3.0.0 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (3.1.0)\n",
      "Requirement already satisfied: pyasn1>=0.1.3 in /Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages (from rsa<5,>=3.1.4; python_version >= \"3.6\"->google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2->tods) (0.4.8)\n",
      "Installing collected packages: tods\n",
      "  Attempting uninstall: tods\n",
      "    Found existing installation: tods 0.0.2\n",
      "    Uninstalling tods-0.0.2:\n",
      "      Successfully uninstalled tods-0.0.2\n",
      "  Running setup.py develop for tods\n",
      "Successfully installed tods\n"
     ]
    }
   ],
   "source": [
    "!pip install -e git+https://github.com/datamllab/tods.git#egg=tods"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/Desktop/Research/Tods Notebook/src/tods\n"
     ]
    }
   ],
   "source": [
    "%cd src/tods"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Branch 'wangyanghe' set up to track remote branch 'wangyanghe' from 'origin'.\r\n",
      "Switched to a new branch 'wangyanghe'\r\n"
     ]
    }
   ],
   "source": [
    "!git checkout wangyanghe"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/Desktop/Research/Tods Notebook/src/tods/examples/sk_examples\n"
     ]
    }
   ],
   "source": [
    "%cd examples/sk_examples"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "500_UCR_Anomaly_robotDOG1_10000_19280_19360.txt\r\n",
      "DeepLog_test.py\r\n",
      "IsolationForest_test.py\r\n",
      "MatrixProfile_test.py\r\n",
      "Telemanom_test.py\r\n"
     ]
    }
   ],
   "source": [
    "!ls"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Imports"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import argparse\n",
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.metrics import precision_recall_curve\n",
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import confusion_matrix\n",
    "from sklearn.metrics import classification_report\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn import metrics"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tods.tods_skinterface.primitiveSKI.detection_algorithm.DeepLog_skinterface import DeepLogSKI"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.preprocessing.data module is  deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.preprocessing. Anything that cannot be imported from sklearn.preprocessing is now part of the private API.\n",
      "  warnings.warn(message, FutureWarning)\n",
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.decomposition.truncated_svd module is  deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.decomposition. Anything that cannot be imported from sklearn.decomposition is now part of the private API.\n",
      "  warnings.warn(message, FutureWarning)\n",
      "d3m.primitives.tods.detection_algorithm.LSTMODetector: Primitive is not providing a description through its docstring.\n"
     ]
    }
   ],
   "source": [
    "from tods.tods_skinterface.primitiveSKI.detection_algorithm.Telemanom_skinterface import TelemanomSKI"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [],
   "source": [
    "from d3m import index\n",
    "from d3m.metadata.base import ArgumentType\n",
    "from d3m.metadata.pipeline import Pipeline, PrimitiveStep\n",
    "from axolotl.backend.simple import SimpleRunner\n",
    "from tods import generate_dataset, generate_problem\n",
    "from tods.searcher import BruteForceSearch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tods import generate_dataset, load_pipeline, evaluate_pipeline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Dataset"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### UCR Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "data = np.loadtxt(\"./500_UCR_Anomaly_robotDOG1_10000_19280_19360.txt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "shape: (20000,)\n",
      "datatype of data: float64\n",
      "First 5 rows:\n",
      " [0.145299 0.128205 0.094017 0.076923 0.111111]\n"
     ]
    }
   ],
   "source": [
    "print(\"shape:\", data.shape)\n",
    "print(\"datatype of data:\",data.dtype)\n",
    "print(\"First 5 rows:\\n\", data[:5])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train = np.expand_dims(data[:10000], axis=1)\n",
    "X_test = np.expand_dims(data[10000:], axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "First 5 rows train:\n",
      " [[0.145299]\n",
      " [0.128205]\n",
      " [0.094017]\n",
      " [0.076923]\n",
      " [0.111111]]\n",
      "First 5 rows test:\n",
      " [[0.076923]\n",
      " [0.076923]\n",
      " [0.076923]\n",
      " [0.094017]\n",
      " [0.145299]]\n"
     ]
    }
   ],
   "source": [
    "print(\"First 5 rows train:\\n\", X_train[:5])\n",
    "print(\"First 5 rows test:\\n\", X_test[:5])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Yahoo Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_yahoo = pd.read_csv('../../datasets/anomaly/raw_data/yahoo_sub_5.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "shape: (1400, 7)\n",
      "First 5 rows:\n",
      "    timestamp  value_0   value_1   value_2  value_3  value_4  anomaly\n",
      "0          1    12183  0.000000  3.716667        5     2109        0\n",
      "1          2    12715  0.091758  3.610833       60     3229        0\n",
      "2          3    12736  0.172297  3.481389       88     3637        0\n",
      "3          4    12716  0.226219  3.380278       84     1982        0\n",
      "4          5    12739  0.176358  3.193333      111     2751        0\n"
     ]
    }
   ],
   "source": [
    "print(\"shape:\", data_yahoo.shape)\n",
    "print(\"First 5 rows:\\n\", data_yahoo[:5])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## SK Example 1: DeepLog"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "282/282 [==============================] - 1s 5ms/step - loss: 0.4239 - val_loss: 0.2694\n",
      "Epoch 2/10\n",
      "282/282 [==============================] - 1s 2ms/step - loss: 0.3344 - val_loss: 0.2818\n",
      "Epoch 3/10\n",
      "282/282 [==============================] - 1s 2ms/step - loss: 0.3444 - val_loss: 0.2806\n",
      "Epoch 4/10\n",
      "282/282 [==============================] - 1s 2ms/step - loss: 0.3575 - val_loss: 0.2731\n",
      "Epoch 5/10\n",
      "282/282 [==============================] - 1s 2ms/step - loss: 0.3364 - val_loss: 0.2783\n",
      "Epoch 6/10\n",
      "282/282 [==============================] - 1s 2ms/step - loss: 0.3447 - val_loss: 0.2742\n",
      "Epoch 7/10\n",
      "282/282 [==============================] - 1s 2ms/step - loss: 0.3357 - val_loss: 0.2586\n",
      "Epoch 8/10\n",
      "282/282 [==============================] - 1s 2ms/step - loss: 0.3392 - val_loss: 0.2804\n",
      "Epoch 9/10\n",
      "282/282 [==============================] - 1s 2ms/step - loss: 0.3442 - val_loss: 0.2691\n",
      "Epoch 10/10\n",
      "282/282 [==============================] - 1s 2ms/step - loss: 0.3475 - val_loss: 0.2683\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n",
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n",
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n"
     ]
    }
   ],
   "source": [
    "transformer = DeepLogSKI()\n",
    "transformer.fit(X_train)\n",
    "prediction_labels_train = transformer.predict(X_train)\n",
    "prediction_labels_test = transformer.predict(X_test)\n",
    "prediction_score = transformer.predict_score(X_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Primitive:  d3m.primitives.tods.detection_algorithm.deeplog(hyperparams=Hyperparams({'contamination': 0.1, 'window_size': 1, 'step_size': 1, 'return_subseq_inds': False, 'use_columns': (), 'exclude_columns': (), 'return_result': 'new', 'use_semantic_types': False, 'add_index_columns': False, 'error_on_no_input': True, 'return_semantic_type': 'https://metadata.datadrivendiscovery.org/types/Attribute', 'hidden_size': 64, 'loss': 'mean_squared_error', 'optimizer': 'Adam', 'epochs': 10, 'batch_size': 32, 'dropout_rate': 0.2, 'l2_regularizer': 0.1, 'validation_size': 0.1, 'features': 1, 'stacked_layers': 1, 'preprocessing': True, 'verbose': 1}), random_seed=0)\n",
      "Prediction Labels\n",
      " [[0]\n",
      " [0]\n",
      " [0]\n",
      " ...\n",
      " [0]\n",
      " [0]\n",
      " [0]]\n",
      "Prediction Score\n",
      " [[0.        ]\n",
      " [0.3569443 ]\n",
      " [0.3569443 ]\n",
      " ...\n",
      " [0.77054234]\n",
      " [0.4575615 ]\n",
      " [0.17499346]]\n"
     ]
    }
   ],
   "source": [
    "print(\"Primitive: \", transformer.primitive)\n",
    "print(\"Prediction Labels\\n\", prediction_labels_test)\n",
    "print(\"Prediction Score\\n\", prediction_score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_true = prediction_labels_train\n",
    "y_pred = prediction_labels_test\n",
    "precision, recall, thresholds = precision_recall_curve(y_true, y_pred)\n",
    "f1_scores = 2*recall*precision/(recall+precision)\n",
    "fpr, tpr, threshold = metrics.roc_curve(y_true, y_pred)\n",
    "roc_auc = metrics.auc(fpr, tpr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy Score:  0.903\n"
     ]
    }
   ],
   "source": [
    "print('Accuracy Score: ', accuracy_score(y_true, y_pred))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[8646,  358],\n",
       "       [ 612,  384]])"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "confusion_matrix(y_true, y_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.93      0.96      0.95      9004\n",
      "           1       0.52      0.39      0.44       996\n",
      "\n",
      "    accuracy                           0.90     10000\n",
      "   macro avg       0.73      0.67      0.69     10000\n",
      "weighted avg       0.89      0.90      0.90     10000\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(classification_report(y_true, y_pred))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Best threshold:  1\n",
      "Best F1-Score:  0.4418872266973533\n"
     ]
    }
   ],
   "source": [
    "print('Best threshold: ', thresholds[np.argmax(f1_scores)])\n",
    "print('Best F1-Score: ', np.max(f1_scores))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAq5UlEQVR4nO3deZgU1bnH8e/L4owLoAJuLIICCnFBmWhQ476gQQkBEY0acMGIJka9XMk1V40ak7gkamRkE0FUEDUKGoVoRDHeqOCGwAAiCAygEBYVWWR57x+nxgzDLA0z1TUz9fs8zzzTVX26+y2WfqvOOfUec3dERCS96iQdgIiIJEuJQEQk5ZQIRERSTolARCTllAhERFJOiUBEJOWUCEREUk6JQKQcZvaZma03s7Vm9rmZjTSzPYo9f5yZvWZmX5vZl2b2gpl1KPEeDc3sfjNbFL3Pp9F2k+wfkcj2lAhEKnauu+8BdASOAn4NYGadgb8D44EDgNbAR8BbZnZQ1GYX4B/A94AuQEOgM7ASOCarRyFSBtOdxSJlM7PPgCvc/dVo+27ge+7+IzN7E/jY3fuXeM3LwAp3v9TMrgB+Bxzs7muzHL5IRnRFIJIhM2sOnA3MM7PdgOOAp0tpOg44I3p8OjBRSUCqMyUCkYo9b2ZfA4uB5cCtwN6E/z/LSmm/DCjq/29cRhuRakOJQKRiP3b3BsDJwKGEL/nVwFZg/1La7w/8O3q8sow2ItWGEoFIhtz9DWAkcK+7fwP8Czi/lKa9CAPEAK8CZ5nZ7lkJUmQnKBGI7Jj7gTPM7EhgIPAzM/ulmTUws73M7E7CrKDfRu1HE7qUnjWzQ82sjpk1NrP/MbNzEjkCkRKUCER2gLuvAB4DbnH3fwJnAT8hjAMsJEwvPcHdP4nabyQMGM8GXgG+At4ldC+9k/UDECmFpo+KiKScrghERFJOiUBEJOWUCEREUk6JQEQk5eolHcCOatKkibdq1SrpMEREapT33nvv3+7etLTnalwiaNWqFdOmTUs6DBGRGsXMFpb1nLqGRERSTolARCTllAhERFJOiUBEJOWUCEREUi62RGBmI8xsuZnNKON5M7MHzWyemU03s6PjikVERMoW5xXBSMJi3WU5G2gb/fQDHo4xFhERKUNsicDdpwCrymnSDXjMg7eBPc1MKzmJiJRQWAi33AIFBfG8f5I3lDUjLNhRpDDat936rmbWj3DVQMuWLbMSnIhIktzhtdcgPx/Gj4etW2H//aF9+6r/rBoxWOzuQ909z93zmjYt9Q5pEZFaYc0aePDB8IV/+unwxhtw443w6adw9dXxfGaSVwRLgBbFtptH+0REUuejj8LZ/+OPw7p1cOyxMGoU9OoFubnxfnaSiWACcK2ZjQWOBb509+26hUREaquNG+HZZ0MCeOut8IV/0UXQvz906pS9OGJLBGY2BjgZaGJmhcCtQH0Adx8MvAScA8wD1gF944pFRKQ6WbQIhgyB4cNh+XJo0wbuuw/69IG9985+PLElAne/sILnHbgmrs8XEalOtm6FV18NZ/8vvBD2de0K11wTxgLqJDhiW+PKUIuI1CSrV8PIkfDww/DJJ9C0Kdx0E1x1FRx4YNLRBUoEIiIxeP99GDQIxoyB9evhuOPgttugRw/IyUk6um0pEYiIVJENG+Dpp0MCeOcd2G03uOSSMO2zY8ekoyubEoGISCUtWACDB8Mjj8DKlXDIIfDAA3DppbDnnklHVzElAhGRnbB1K0yaFM7+X3opDPZ26xamfp56KpglHWHmlAhERHbAypUwYkS4Apg/H/bdF37zG+jXD5o3Tzq6naNEICKSgalTw9n/2LHhRrATT4S77oLu3WGXXZKOrnKUCEREyrB+ffjiz8+HadNgjz3gssvC4O/hhycdXdVRIhARKWHevND1M2JEuA+gQwd46KEwA6hhw6Sjq3pKBCIiwJYtYdA3Px8mToR69UK3T//+cNJJNWvwd0cpEYhIqq1YEaZ9Dh4MCxfCAQeEG7+uvDI8TgMlAhFJHXd4++1w9j9uHHz7LZxyCtx7b5gCWr9+0hFmlxKBiKTGN9+Ekg/5+fDBB9CgQZj2efXVYRwgrZQIRKTWmzs3FH179FH48ssw4+fhh+Hii8NMoLRTIhCRWmnzZnjxxXD2/8orobunR49Q9vn442v34O+OUiIQkVrliy/Cgi9DhsDixeFu3zvugCuugP32Szq66kmJQERqPPew1OOgQWHpx02bwmIvDz4YFn+pp2+6cumPR0RqrLVr4YknQvfP9OnQqFHo+vn5z0MFUMmMEoGI1DgFBeHLf9Qo+PrrUOt/2DC48ELYffeko6t5lAhEpEbYtAkmTAjdP5Mnh0JvvXqFO39/8AMN/laGEoGIVGvLlsHQoeFn6dKwzu/vfx+Kv+2zT9LR1Q5KBCJS7bjDlCnh7P+558JU0C5dQhmIc86BunWTjrB2USIQkWrjq69g9OjQ/z9rFuy1F1x3XRj8bdMm6ehqLyUCEUncjBnhy3/06DATqFOnUAL6ggvCAvASLyUCEUnEt9+Gbp/8/NANlJMTvvivuQa+/30N/maTEoGIZFVhYRj4HTYMPv8cWreGu++Gvn2hSZOko0snJQIRiZ07vPZaOPsfPx62bg2Dvv37w1lnafA3aUoEIhKbNWvgscdCApgzBxo3hhtvhKuugoMOSjo6KaJEICJV7qOPwpf/44/DunVw7LHhLuBevSA3N+nopCQlAhGpEhs3hoJv+fmhAFxuLlx0Uej+6dQp6eikPEoEIlIpixaFks/Dh8Py5WG+/333QZ8+sPfeSUcnmagT55ubWRczm2Nm88xsYCnPtzSzyWb2gZlNN7Nz4oxHRKrG1q3w97/Dj38cZv384Q+h3s/EiWEs4IYblARqktiuCMysLjAIOAMoBKaa2QR3n1Ws2W+Ace7+sJl1AF4CWsUVk4hUzurVMHJkWObxk0+gaVO46aYw+HvggUlHJzsrzq6hY4B57j4fwMzGAt2A4onAgYbR40bA0hjjEZGd9P77oe7PmDGwfj0cdxzceiv07BluBJOaLc5E0AxYXGy7EDi2RJvbgL+b2S+A3YHTS3sjM+sH9ANo2bJllQcqItvbsAGefjokgHfeCaUeLr44DP527Jh0dFKVYh0jyMCFwEh3bw6cA4w2s+1icveh7p7n7nlNmzbNepAiabJgQejuad4cLr00dAfdfz8sWRLuCFYSqH3ivCJYArQott082lfc5UAXAHf/l5nlAk2A5THGJSIlbN0KkyaFs/+XXgp1frp1C3V/Tj1VdX9quzgTwVSgrZm1JiSA3sBFJdosAk4DRppZeyAXWBFjTCJSzMqVocrn4MEwfz7suy/cfDP06wctWlT8eqkdYksE7r7ZzK4FJgF1gRHuPtPMbgemufsE4EZgmJldTxg47uPuHldMIhJMnRrO/seODTeC/fCHcNdd0L17WAJS0iXWG8rc/SXClNDi+24p9ngWcHycMYhIsH59+OLPz4dp08Ii7337hsHfww9POjpJku4sFqnl5s0LXT8jRoSB3/bt4aGH4JJLoGHDil8vtZ8SgUgttGVLGPTNzw93+9atG7p9rrkGTjpJg7+yLSUCkVpkxQp45JFwBbBwIey/P9x2G1x5JRxwQNLRSXWlRCBSw7nD22+Hs/9x48ISkKecAvfeG6aA1q+fdIRS3SkRiNRQ33wTSj7k58MHH0CDBmHa59VXQ4cOSUcnNYkSgUgNM3duKPr26KPw5Zdw2GFh+6c/DclAZEcpEYjUAJs3w4svhrP/V16BevVCwbf+/eGEEzT4K5WjRCBSjX3xRVjwZcgQWLw41P+54w644grYb7+ko5PaQolApJpxD0s9DhoUln7ctAlOPx0eeADOPTdcDYhUJf2TEqkm1q6FJ54I3T/Tp0OjRqHr5+qr4ZBDko5OarOME4GZ7ebu6+IMRiSNCgrCl/+oUfD113DkkaHc80UXhTIQInGrMBGY2XHAcGAPoKWZHQlc5e794w5OpLbatAkmTAjdP5Mnh0Jv558f7vz9wQ80+CvZlckVwZ+Bs4AJAO7+kZmdGGtUIrXUsmXhbH/oUFi6FFq2DFU/L78c9tkn6egkrTLqGnL3xbbtKcqWeMIRqX3cYcqUcPb/3HNhKuhZZ4UyEOecE+oAiSQpk0SwOOoecjOrD1wHFMQblkjN99VXMHp06P+fNQv22gt++csw+NumTdLRifxHJong58ADhMXolwB/BzQ+IFKGGTPCl//o0WEmUKdOoQT0BReEBeBFqptMEsEh7v7T4jvM7HjgrXhCEql5vv02dPvk54duoJyc8MV/zTXw/e9r8Feqt0wSwV+AozPYJ5I6hYVh4HfYMPj8c2jdGu6+O6z81aRJ0tGJZKbMRGBmnYHjgKZmdkOxpxoS1iAWSSV3eO21cPY/fjxs3RoGffv3D4PAGvyVmqa8K4JdCPcO1AOK1zT8CugZZ1Ai1dGaNfDYYyEBzJkDjRvDjTfCVVfBQQclHZ3IziszEbj7G8AbZjbS3RdmMSaRauWjj8KX/+OPw7p1cOyx4S7gXr0gNzfp6EQqL5MxgnVmdg/wPeC7f/bufmpsUYkkbOPGUPAtPz8UgMvNDSUf+vcPs4BEapNMEsETwFNAV8JU0p8BK+IMSiQpixaFks/Dh8Py5WG+/333QZ8+sPfeSUcnEo9MEkFjd3/EzK4r1l00Ne7ARLJl61Z49dVw9v/CC2Ff167h7P+MM6BOnWTjE4lbJolgU/R7mZn9CFgK6NxIarzVq2HkyLDM4yefQNOmcNNNYfD3wAOTjk4kezJJBHeaWSPgRsL9Aw2BX8UZlEic3n8/1P0ZMwbWr4fjjoNbbw1LP+bkJB2dSPZVmAjc/cXo4ZfAKfDdncUiNcaGDfD00yEBvPNOKPVw8cWh+6djx6SjE0lWeTeU1QV6EWoMTXT3GWbWFfgfYFfgqOyEKLLzFiwIVT4feQRWroR27eD+++FnP4M990w6OpHqobwrgkeAFsC7wINmthTIAwa6+/NZiE1kp2zdCpMmhbP/l14KdX66dQt1f049VXV/REoqLxHkAUe4+1YzywU+Bw5295XZCU1kx6xcGap8Dh4M8+fDvvvCzTdDv37QokXS0YlUX+VNjPvW3bcCuPsGYP6OJgEz62Jmc8xsnpkNLKNNLzObZWYzzezJHXl/EYCpU8M8/2bN4L//O/weOzbcE3DHHUoCIhUp74rgUDObHj024OBo2wB39yPKe+NojGEQcAZQCEw1swnuPqtYm7bAr4Hj3X21mWmxPsnI+vXhyz4/H6ZNC4u89+0bBn8PPzzp6ERqlvISQftKvvcxwDx3nw9gZmOBbsCsYm2uBAa5+2oAd19eyc+UWm7evND1M2JEuA+gfXt46CG45BJo2DDp6ERqpvKKzlW20FwzYHGx7ULg2BJt2gGY2VuE0ta3ufvEkm9kZv2AfgAtW7asZFhS02zZEgZ98/Nh4sRQ5rl79zD4e9JJGvwVqayMFq+P+fPbAicDzYEpZna4u68p3sjdhwJDAfLy8jzLMUpCVqwI0z4HD4aFC2H//eG22+DKK+GAA5KOTqT2iDMRLCFMPy3SPNpXXCHwjrtvAhaY2VxCYlAto5Ryh7ffDmf/48aFJSBPPhnuvTdMAa1fP+kIRWqfjMppmdmuZnbIDr73VKCtmbU2s12A3sCEEm2eJ1wNYGZNCF1F83fwc6QW+OabUPGzU6dQ8mH8+HDmP3MmTJ4cyj8oCYjEo8JEYGbnAh8CE6PtjmZW8gt9O+6+GbgWmAQUAOPcfaaZ3W5m50XNJgErzWwWMBkYoPsU0mXuXLj++jDl88orYdOmUARuyZIwCNyhQ9IRitR+5l5+l7uZvQecCrzu7kdF+z5290Qm6eXl5fm0adOS+GipIps3w4svhu6fV16BevWgR48w+HvCCRr8FYmDmb3n7nmlPZdRGWp3/9K2/d+pAVvZYV98Ebp/hgyBxYuhefNww9cVV8B++yUdnUh6ZZIIZprZRUDd6AawXwL/F29YUlu4h6UeBw0KSz9u2gSnnQYPPADnnhuuBkQkWZn8N/wFcDOwEXiS0K9/Z5xBSc23di088UTo/pk+HRo1Cnf9/vzncOihSUcnIsVlkggOdfebCclApFwFBeHLf9Qo+PprOPJIGDo0LPy+++5JRycipckkEdxnZvsBzwBPufuMmGOSGmbTJpgwIXT/TJ4Mu+wC558frgA6d9bgr0h1l8kKZadEiaAXMMTMGhISgrqHUm7ZsnC2P3QoLF0KLVvCXXfB5ZfDPiofKFJjZDRU5+6fExanmQz8N3ALGidIJXeYMiWc/T/3XJgKetZZYe7/j34U6gCJSM1SYSIws/bABUAPYCXwFGEhe0mRr76C0aND//+sWWGZx1/+Mgz+tm2bdHQiUhmZXBGMIHz5n+XuS2OOR6qZGTPCl//o0WEm0NFHh0JwvXuHBeBFpObLZIygczYCkerj229Dt09+fugGysmBCy4Ig7/HHKPBX5HapsxEYGbj3L2XmX3MtncSZ7RCmdQ8hYVh4HfYMPj8c2jVCv74R7jsMmjSJOnoRCQu5V0RXBf97pqNQCQZ7mHK56BBoeLn1q1w9tnh7L9LFw3+iqRBeSuULYse9nf3m4o/Z2Z/BG7a/lVSU3z5Zbjp6+GHYfZs2HtvuOGGMPh70EFJRyci2ZTJegRnlLLv7KoORLLjo4/gqqvCCl/XXRfW+R01KnQL3X23koBIGpU3RnA10B84yMymF3uqAfBW3IFJ1dm4MRR8y88PBeByc+HCC0P3T16pRWlFJE3KGyN4EngZ+D0wsNj+r919VaxRSZVYtCiUfB4+HJYvh4MPhvvugz59QleQiAiUnwjc3T8zs2tKPmFmeysZVE9bt8Krr4az/xdeCIPBXbuGRV/OOAPqZLQ4qYikSUVXBF2B9wjTR4vPHndAvcnVyOrVMHJkGPz95BNo2hRuugn69QvTQEVEylLerKGu0e/W2QtHdtT774ez/yefhPXrQ7XPW28Ni73n5CQdnYjUBJnUGjoe+NDdvzGzi4GjgfvdfVHs0UmpNmyAp58OCeDtt0Oph4svhquvhqOOSjo6EalpMukxfhhYZ2ZHEorNfQqMjjUqKdWCBTBwILRoAZdeCqtWwf33w5Il4Y5gJQER2RmZFJ3b7O5uZt2Ah9z9ETO7PO7AJNi6FSZNCmf/f/tbqPPTrVuY+nnaaar7IyKVl0ki+NrMfg1cAvzQzOoA9eMNS1auhEcfDYO/8+fDvvvCzTeHwd8WLZKOTkRqk0wSwQXARcBl7v65mbUE7ok3rPSaOjWc/Y8dG8YCfvhD+N3v4Cc/CUtAiohUtUzKUH9uZk8A3zezrsC77v5Y/KGlx/r18NRTofDbtGlhkfc+fcLg7xGq8SoiMatwsNjMegHvAucT1i1+x8x6xh1YGnz6KQwYAM2bQ9++8M038Je/hPV/H35YSUBEsiOTrqGbge+7+3IAM2sKvAo8E2dgtdWWLfDyy+Hsf+LEUOa5e/cw+HvyyRr8FZHsyyQR1ClKApGVZDbtVIpZsSIs8Th4MCxcCPvvH278uvJKaNYs6ehEJM0ySQQTzWwSMCbavgB4Kb6Qag93eOedcPY/blxYAvLkk+Hee8MU0PqaeyUi1UAmg8UDzOwnwAnRrqHu/ly8YdVs69aFkg/5+fDBB9CgQTjz798fOnRIOjoRkW2Vtx5BW+Be4GDgY+C/3H1JtgKriebODYO8I0fCmjVw2GFh+6c/DclARKQ6Kq+vfwTwItCDUIH0Lzv65mbWxczmmNk8MxtYTrseZuZmVuOWSdm8GZ5/Hs48Ew45BB56CM46C6ZMgenTw9KPSgIiUp2V1zXUwN2HRY/nmNn7O/LGZlYXGERY6rIQmGpmE9x9Vol2DYDrgHd25P2T9sUXYcGXIUNg8eIwBfSOO+CKK2C//ZKOTkQkc+UlglwzO4r/rEOwa/Ftd68oMRwDzHP3+QBmNhboBswq0e4O4I/AgB2MPevcw1KP+fnwzDOwaVOo9/PAA3DuuVAvk6F3EZFqpryvrmXAn4ptf15s24FTK3jvZsDiYtuFwLHFG5jZ0UALd/+bmZWZCMysH9APoGXLlhV8bNVbuxaeeCIkgOnToVGjMPD785/DoYdmPRwRkSpV3sI0p8T5wVHxuj8BfSpq6+5DgaEAeXl5HmdcxRUUhMHeUaPgq6/gyCNDueeLLgplIEREaoM4OzOWAMXrZDaP9hVpABwGvG7hdtr9gAlmdp67T4sxroy8+SacdFKY63/++eEKoHNn3fkrIrVPnIlgKtDWzFoTEkBvQhVTANz9S6BJ0baZvU6Yopp4EgB47bXwe8ECOOCAZGMREYlTbKUi3H0zcC0wCSgAxrn7TDO73czOi+tzq0pBARx4oJKAiNR+maxZbMBPgYPc/fZoPYL93P3dil7r7i9RohyFu99SRtuTM4o4S2bPhvbtk45CRCR+mVwR5AOdgQuj7a8J9wfUWlu2wJw5SgQikg6ZjBEc6+5Hm9kHAO6+2sxq9VpZCxeG1cE0NVRE0iCTK4JN0V3CDt+tR7A11qgSNnt2+K0rAhFJg0wSwYPAc8A+ZvY74J/AXbFGlbCCgvBbiUBE0iCTMtRPmNl7wGmE8hI/dveC2CNLUEEBNGkCjRsnHYmISPwymTXUElgHvFB8n7svijOwJGnGkIikSSaDxX8jjA8YkAu0BuYA34sxrsS4hyuCnj2TjkREJDsy6Ro6vPh2VCiuf2wRJezf/4ZVqzRjSETSY4fvLI7KTx9bYcMaSgPFIpI2mYwR3FBssw5wNLA0togSpkQgImmTyRhB8YUWNxPGDJ6NJ5zkzZ4Nu+0GLVpU3FZEpDYoNxFEN5I1cPf/ylI8iSsoCGsP14mtHJ+ISPVS5tedmdVz9y3A8VmMJ3EFBeoWEpF0Ke+K4F3CeMCHZjYBeBr4puhJd/9rzLFl3TffwKJFmjEkIumSyRhBLrCSsEZx0f0EDtS6RDBnTvitKwIRSZPyEsE+0YyhGfwnARTJ2rrB2VQ0Y0hXBCKSJuUlgrrAHmybAIrUykQwe3YYJG7bNulIRESyp7xEsMzdb89aJNVAQQEcfDDk5CQdiYhI9pQ3SbK0K4FaraBA3UIikj7lJYLTshZFNbB5M3zyiQaKRSR9ykwE7r4qm4Ekbf582LRJiUBE0kf3z0Y0Y0hE0kqJIKJ1ikUkrZQIIgUFsP/+0KhR0pGIiGSXEkFk9mx1C4lIOikR8J/lKdUtJCJppEQALFsGX32lRCAi6aREwH8GitU1JCJppESAlqcUkXRTIiAkggYN4IADko5ERCT7Yk0EZtbFzOaY2TwzG1jK8zeY2Swzm25m/zCzA+OMpyxFM4YsddWVRERiTATReseDgLOBDsCFZtahRLMPgDx3PwJ4Brg7rnjKoxlDIpJmcV4RHAPMc/f57v4tMBboVryBu09293XR5ttA8xjjKdWXX8LSpRooFpH0ijMRNAMWF9sujPaV5XLg5dKeMLN+ZjbNzKatWLGiCkPU8pQiItVisNjMLgbygHtKe97dh7p7nrvnNW3atEo/WzOGRCTtMlm8fmctAVoU224e7duGmZ0O3Ayc5O4bY4ynVAUFUK8eHHRQtj9ZRKR6iPOKYCrQ1sxam9kuQG9gQvEGZnYUMAQ4z92XxxhLmWbPDmsU16+fxKeLiCQvtkTg7puBa4FJQAEwzt1nmtntZnZe1OweYA/gaTP70MwmlPF2sdGMIRFJuzi7hnD3l4CXSuy7pdjj0+P8/Ip8+y18+in07JlkFCIiyaoWg8VJmTcPtmzRFYGIpFuqE4FmDImIpDwRFFUdPeSQZOMQEUlSqhNBQQG0aAF77JF0JCIiyUl9IlC3kIikXWoTwdatWqdYRARSnAgKC2HdOl0RiIikNhFoxpCISJDaRKB1ikVEgtQmgoIC2Gsv2GefpCMREUlWqhNB+/ZanlJEJLWJQDOGRESCVCaCVatg+XINFIuIQEoTQdGMIV0RiIikNBEUzRjSFYGISEoTQUEB5ORAq1ZJRyIikrzUJoJ27aBu3aQjERFJXioTwezZ6hYSESmSukSwfj0sWKBEICJSJHWJYO5ccNeMIRGRIqlLBJoxJCKyrXpJB5BtBQWhrES7dklHIlL7bdq0icLCQjZs2JB0KKmRm5tL8+bNqV+/fsavSV0imD07TBvdddekIxGp/QoLC2nQoAGtWrXCVNgrdu7OypUrKSwspHXr1hm/LnVdQ1qeUiR7NmzYQOPGjZUEssTMaNy48Q5fgaUqEWzZAnPmKBGIZJOSQHbtzJ93qhLBwoWwcaNmDImIFJeqRKDlKUXS6fnnn8fMmF00bRB4/fXX6dq16zbt+vTpwzPPPAOEge6BAwfStm1bjj76aDp37szLL79c6Vh+//vf06ZNGw455BAmTZpUaht35+abb6Zdu3a0b9+eBx98EIB77rmHjh070rFjRw477DDq1q3LqlWrKh1TqgaLVXVUJJ3GjBnDCSecwJgxY/jtb3+b0Wv+93//l2XLljFjxgxycnL44osveOONNyoVx6xZsxg7diwzZ85k6dKlnH766cydO5e6JerdjBw5ksWLFzN79mzq1KnD8uXLARgwYAADBgwA4IUXXuDPf/4ze++9d6VigpQlgtmzoWlTaNw46UhE0udXv4IPP6za9+zYEe6/v/w2a9eu5Z///CeTJ0/m3HPPzSgRrFu3jmHDhrFgwQJycnIA2HfffenVq1el4h0/fjy9e/cmJyeH1q1b06ZNG9599106d+68TbuHH36YJ598kjp1QqfNPqWsqTtmzBguvPDCSsVTJHVdQ+oWEkmX8ePH06VLF9q1a0fjxo157733KnzNvHnzaNmyJQ0bNqyw7fXXX/9dd03xnz/84Q/btV2yZAktWrT4brt58+YsWbJku3affvopTz31FHl5eZx99tl88skn2zy/bt06Jk6cSI8ePSqMLxOpuSJwD4ng/POTjkQknSo6c4/LmDFjuO666wDo3bs3Y8aMoVOnTmXOrtnRWTd//vOfKx1jSRs3biQ3N5dp06bx17/+lcsuu4w333zzu+dfeOEFjj/++CrpFoKYE4GZdQEeAOoCw939DyWezwEeAzoBK4EL3P2zOGJZsQJWr9YVgUiarFq1itdee42PP/4YM2PLli2YGffccw+NGzdm9erV27Vv0qQJbdq0YdGiRXz11VcVXhVcf/31TJ48ebv9vXv3ZuDAgdvsa9asGYsXL/5uu7CwkGbNmm332ubNm/OTn/wEgO7du9O3b99tnh87dmyVdQsBYXQ6jh/Cl/+nwEHALsBHQIcSbfoDg6PHvYGnKnrfTp06+c54/XV3cJ84cadeLiI7YdasWYl+/pAhQ7xfv37b7DvxxBP9jTfe8A0bNnirVq2+i/Gzzz7zli1b+po1a9zdfcCAAd6nTx/fuHGju7svX77cx40bV6l4ZsyY4UcccYRv2LDB58+f761bt/bNmzdv1+6mm27yRx55xN3dJ0+e7Hl5ed89t2bNGt9rr7187dq1ZX5OaX/uwDQv43s1zjGCY4B57j7f3b8FxgLdSrTpBoyKHj8DnGYx3X2iGUMi6TNmzBi6d+++zb4ePXowZswYcnJyePzxx+nbty8dO3akZ8+eDB8+nEaNGgFw55130rRpUzp06MBhhx1G165dMxozKM/3vvc9evXqRYcOHejSpQuDBg36bsbQOeecw9KlSwEYOHAgzz77LIcffji//vWvGT58+Hfv8dxzz3HmmWey++67VyqW4iwkiqpnZj2BLu5+RbR9CXCsu19brM2MqE1htP1p1ObfJd6rH9APoGXLlp0WLly4w/GMHw+PPgp//SvUSdUQuUhyCgoKaK/+2Kwr7c/dzN5z97zS2teIr0R3H+ruee6e17Rp0516j27d4PnnlQREREqK82txCdCi2HbzaF+pbcysHtCIMGgsIiJZEmcimAq0NbPWZrYLYTB4Qok2E4CfRY97Aq95XH1VIpII/ZfOrp35844tEbj7ZuBaYBJQAIxz95lmdruZnRc1ewRobGbzgBuAgaW/m4jURLm5uaxcuVLJIEs8Wo8gNzd3h14X22BxXPLy8nzatGlJhyEiGdAKZdlX1gpl5Q0Wp+bOYhHJvvr16+/QSlmSDM2hERFJOSUCEZGUUyIQEUm5GjdYbGYrgB2/tThoAvy7wla1i445HXTM6VCZYz7Q3Uu9I7fGJYLKMLNpZY2a11Y65nTQMadDXMesriERkZRTIhARSbm0JYKhSQeQAB1zOuiY0yGWY07VGIGIiGwvbVcEIiJSghKBiEjK1cpEYGZdzGyOmc0zs+0qmppZjpk9FT3/jpm1SiDMKpXBMd9gZrPMbLqZ/cPMDkwizqpU0TEXa9fDzNzMavxUw0yO2cx6RX/XM83syWzHWNUy+Lfd0swmm9kH0b/vc5KIs6qY2QgzWx6t4Fja82ZmD0Z/HtPN7OhKf2hZixnX1B+gLvApcBCwC/AR0KFEm/7A4Ohxb+CppOPOwjGfAuwWPb46DccctWsATAHeBvKSjjsLf89tgQ+AvaLtfZKOOwvHPBS4OnrcAfgs6bgrecwnAkcDM8p4/hzgZcCAHwDvVPYza+MVwTHAPHef7+7fAmOBbiXadANGRY+fAU4zM8tijFWtwmN298nuvi7afJuwYlxNlsnfM8AdwB+B2lAHOZNjvhIY5O6rAdx9eZZjrGqZHLMDRavKNwKWZjG+KufuU4BV5TTpBjzmwdvAnma2f2U+szYmgmbA4mLbhdG+Utt4WEDnS6BxVqKLRybHXNzlhDOKmqzCY44umVu4+9+yGViMMvl7bge0M7O3zOxtM+uStejikckx3wZcbGaFwEvAL7ITWmJ29P97hbQeQcqY2cVAHnBS0rHEyczqAH8C+iQcSrbVI3QPnUy46ptiZoe7+5okg4rZhcBId7/PzDoDo83sMHffmnRgNUVtvCJYArQott082ldqGzOrR7icXJmV6OKRyTFjZqcDNwPnufvGLMUWl4qOuQFwGPC6mX1G6EudUMMHjDP5ey4EJrj7JndfAMwlJIaaKpNjvhwYB+Du/wJyCcXZaquM/r/viNqYCKYCbc2stZntQhgMnlCizQTgZ9HjnsBrHo3C1FAVHrOZHQUMISSBmt5vDBUcs7t/6e5N3L2Vu7cijIuc5+41eZ3TTP5tP0+4GsDMmhC6iuZnMcaqlskxLwJOAzCz9oREsCKrUWbXBODSaPbQD4Av3X1ZZd6w1nUNuftmM7sWmESYcTDC3Wea2e3ANHefADxCuHycRxiU6Z1cxJWX4THfA+wBPB2Niy9y9/MSC7qSMjzmWiXDY54EnGlms4AtwAB3r7FXuxke843AMDO7njBw3Kcmn9iZ2RhCMm8SjXvcCtQHcPfBhHGQc4B5wDqgb6U/swb/eYmISBWojV1DIiKyA5QIRERSTolARCTllAhERFJOiUBEJOWUCKRaMrMtZvZhsZ9W5bRdWwWfN9LMFkSf9X50h+qOvsdwM+sQPf6fEs/9X2VjjN6n6M9lhpm9YGZ7VtC+Y02vxinx0/RRqZbMbK2771HVbct5j5HAi+7+jJmdCdzr7kdU4v0qHVNF72tmo4C57v67ctr3IVRdvbaqY5HaQ1cEUiOY2R7ROgrvm9nHZrZdpVEz29/MphQ7Y/5htP9MM/tX9NqnzayiL+gpQJvotTdE7zXDzH4V7dvdzP5mZh9F+y+I9r9uZnlm9gdg1yiOJ6Ln1ka/x5rZj4rFPNLMeppZXTO7x8ymRjXmr8rgj+VfRMXGzOyY6Bg/MLP/M7NDojtxbwcuiGK5IIp9hJm9G7UtrWKrpE3Stbf1o5/Sfgh3xX4Y/TxHuAu+YfRcE8JdlUVXtGuj3zcCN0eP6xLqDTUhfLHvHu2/CbillM8bCfSMHp8PvAN0Aj4GdifclT0TOAroAQwr9tpG0e/XidY8KIqpWJuiGLsDo6LHuxCqSO4K9AN+E+3PAaYBrUuJc22x43sa6BJtNwTqRY9PB56NHvcBHir2+ruAi6PHexJqEe2e9N+3fpL9qXUlJqTWWO/uHYs2zKw+cJeZnQhsJZwJ7wt8Xuw1U4ERUdvn3f1DMzuJsFjJW1FpjV0IZ9KlucfMfkOoU3M5oX7Nc+7+TRTDX4EfAhOB+8zsj4TupDd34LheBh4wsxygCzDF3ddH3VFHmFnPqF0jQrG4BSVev6uZfRgdfwHwSrH2o8ysLaHMQv0yPv9M4Dwz+69oOxdoGb2XpJQSgdQUPwWaAp3cfZOFiqK5xRu4+5QoUfwIGGlmfwJWA6+4+4UZfMYAd3+maMPMTiutkbvPtbDWwTnAnWb2D3e/PZODcPcNZvY6cBZwAWGhFQirTf3C3SdV8Bbr3b2jme1GqL9zDfAgYQGeye7ePRpYf72M1xvQw93nZBKvpIPGCKSmaAQsj5LAKcB2ay5bWIf5C3cfBgwnLPf3NnC8mRX1+e9uZu0y/Mw3gR+b2W5mtjuhW+dNMzsAWOfujxOK+ZW2Zuym6MqkNE8RCoUVXV1A+FK/uug1ZtYu+sxSeVht7pfAjfafUupFpYj7FGv6NaGLrMgk4BcWXR5ZqEorKadEIDXFE0CemX0MXArMLqXNycBHZvYB4Wz7AXdfQfhiHGNm0wndQodm8oHu/j5h7OBdwpjBcHf/ADgceDfqorkVuLOUlw8FphcNFpfwd8LCQK96WH4RQuKaBbxvYdHyIVRwxR7FMp2wMMvdwO+jYy/+uslAh6LBYsKVQ/0otpnRtqScpo+KiKScrghERFJOiUBEJOWUCEREUk6JQEQk5ZQIRERSTolARCTllAhERFLu/wEt+DXz07Zq7gAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.title('ROC')\n",
    "plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n",
    "plt.legend(loc = 'lower right')\n",
    "plt.ylabel('True Positive Rate')\n",
    "plt.xlabel('False Positive Rate')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## SK Example 2: Telemanom"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "125/125 [==============================] - 1s 8ms/step - loss: 0.0112 - val_loss: 0.0046\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n",
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n"
     ]
    }
   ],
   "source": [
    "transformer = TelemanomSKI(l_s= 2, n_predictions= 1)\n",
    "transformer.fit(X_train)\n",
    "prediction_labels_train = transformer.predict(X_train)\n",
    "prediction_labels_test = transformer.predict(X_test)\n",
    "prediction_score = transformer.predict_score(X_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Primitive:  d3m.primitives.tods.detection_algorithm.telemanom(hyperparams=Hyperparams({'contamination': 0.1, 'window_size': 1, 'step_size': 1, 'return_subseq_inds': False, 'use_columns': (), 'exclude_columns': (), 'return_result': 'new', 'use_semantic_types': False, 'add_index_columns': False, 'error_on_no_input': True, 'return_semantic_type': 'https://metadata.datadrivendiscovery.org/types/Attribute', 'smoothing_perc': 0.05, 'window_size_': 100, 'error_buffer': 50, 'batch_size': 70, 'dropout': 0.3, 'validation_split': 0.2, 'optimizer': 'Adam', 'lstm_batch_size': 64, 'loss_metric': 'mean_squared_error', 'layers': [10, 10], 'epochs': 1, 'patience': 10, 'min_delta': 0.0003, 'l_s': 2, 'n_predictions': 1, 'p': 0.05}), random_seed=0)\n",
      "Prediction Labels\n",
      " [[1]\n",
      " [1]\n",
      " [1]\n",
      " ...\n",
      " [1]\n",
      " [1]\n",
      " [1]]\n",
      "Prediction Score\n",
      " [[0.08822848]\n",
      " [0.07965706]\n",
      " [0.05999164]\n",
      " ...\n",
      " [0.05911084]\n",
      " [0.05963569]\n",
      " [0.06003137]]\n"
     ]
    }
   ],
   "source": [
    "print(\"Primitive: \", transformer.primitive)\n",
    "print(\"Prediction Labels\\n\", prediction_labels_test)\n",
    "print(\"Prediction Score\\n\", prediction_score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_true = prediction_labels_train\n",
    "y_pred = prediction_labels_test\n",
    "precision, recall, thresholds = precision_recall_curve(y_true, y_pred)\n",
    "f1_scores = 2*recall*precision/(recall+precision)\n",
    "fpr, tpr, threshold = metrics.roc_curve(y_true, y_pred)\n",
    "roc_auc = metrics.auc(fpr, tpr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy Score:  0.1839551865559668\n"
     ]
    }
   ],
   "source": [
    "print('Accuracy Score: ', accuracy_score(y_true, y_pred))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[ 990, 8007],\n",
       "       [ 151,  849]])"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "confusion_matrix(y_true, y_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.87      0.11      0.20      8997\n",
      "           1       0.10      0.85      0.17      1000\n",
      "\n",
      "    accuracy                           0.18      9997\n",
      "   macro avg       0.48      0.48      0.18      9997\n",
      "weighted avg       0.79      0.18      0.19      9997\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(classification_report(y_true, y_pred))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Best threshold:  0\n",
      "Best F1-Score:  0.18186778212239701\n"
     ]
    }
   ],
   "source": [
    "print('Best threshold: ', thresholds[np.argmax(f1_scores)])\n",
    "print('Best F1-Score: ', np.max(f1_scores))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAsEElEQVR4nO3debyWc/7H8ddHSoRQGUuaMkXFJHV+yDYZJEn21DBkxoQ09maasU52DWMrKpJlnGRLTJu0GJJW7eGQ6hBRpCRaPr8/vlfmOJ3lPp1z3de5z/1+Ph7341zXdX/v+/5cJ+7P+e7m7oiISPbaLukAREQkWUoEIiJZTolARCTLKRGIiGQ5JQIRkSynRCAikuWUCEREspwSgUgJzOwTM/vezNaa2edmNsTMdi7w/JFmNt7M1pjZajN71cyaF3qPXc3sfjNbGr3PR9F53fTfkcjWlAhESnequ+8MtAQOBf4GYGZtgLHAK8A+QCNgNvC2me0flakBvAEcBLQHdgXaACuBw9J6FyLFMM0sFimemX0CXOzu46Lze4CD3P0UM/svMNfdexR6zSjgS3e/wMwuBm4HfuXua9McvkhKVCMQSZGZ1QdOBvLMbCfgSOD5IooOA06Mjk8ARisJSGWmRCBSuuFmtgZYBqwAbgb2IPz/s7yI8suBLe3/dYopI1JpKBGIlO50d98FaAs0JXzJfw1sBvYuovzewFfR8cpiyohUGkoEIily90nAEOCf7v4d8A5wThFFOxM6iAHGASeZWa20BCmyDZQIRMrmfuBEMzsE6A1caGZXmNkuZra7md1GGBX0j6j804QmpRfNrKmZbWdmdczs72bWIZE7EClEiUCkDNz9S+Ap4CZ3fws4CTiT0A+whDC89Gh3/zAq/wOhw3gR8DrwLTCV0Lz0btpvQKQIGj4qIpLlVCMQEclySgQiIllOiUBEJMspEYiIZLntkw6grOrWresNGzZMOgwRkYwyY8aMr9y9XlHPZVwiaNiwIdOnT086DBGRjGJmS4p7Tk1DIiJZTolARCTLKRGIiGQ5JQIRkSynRCAikuViSwRmNtjMVpjZvGKeNzN70MzyzGyOmbWKKxYRESlenDWCIYTNuotzMtAkenQHHokxFhERKUZsicDd3wRWlVDkNOApD6YAu5mZdnISESlkwwa47jpYtiye90+yj2BfwoYdW+RH17ZiZt3NbLqZTf/yyy/TEpyISGWwdi2ceircey+MHBnPZ2REZ7G7D3T3HHfPqVevyBnSIiJVzhdfQNu2MG4cDBoEl1wSz+ckucTEp8B+Bc7rR9dERLLehx/CSSeFZPDKK3DKKfF9VpI1ghHABdHooSOA1e6+PMF4REQqhalT4cgjYc0amDAh3iQAMdYIzCwXaAvUNbN84GagOoC7PwqMBDoAecA64KK4YhERyRT/+Q907gx77QWjR0OTJvF/ZmyJwN27lvK8A5fH9fkiIpnmscfg0kuhZcuQEH7xi/R8bkZ0FouIVGXu8I9/wJ/+BCeeCBMnpi8JQAbuRyAiUpVs3AiXXRZqA926wcCBUL16emNQjUBEJCHffQdnnBGSwPXXw+DB6U8CoBqBiEgivvwyTBSbNg369w+1gqQoEYiIpNnHH0P79mHJiBdfhNNPTzYeJQIRkTSaMQM6dAh9A2+8EeYLJE19BCIiaTJmDPzmN7DjjvD225UjCYASgYhIWjz5JHTsCI0bw+TJ0LRp0hH9jxKBiEiM3OGOO8LQ0N/8Bt58E/bZJ+mofk6JQEQkJps2Qc+eYWjoeeeFZaR33TXpqLamRCAiEoPvv4ezzw5DQ//yF3jqKahRI+moiqZRQyIiFWzVqjBH4J134IEH4Iorko6oZEoEIiIVaMmSMEdg8WIYNizUCio7JQIRkQry3nthjsD338PYsXDssUlHlBr1EYiIVIA33ghf/NWqwVtvZU4SACUCEZFye/ZZOPlk+OUvQ7/AQQclHVHZKBGIiGwjd/jnP8PQ0COPhP/+F+rXTzqqslMiEBHZBps3w9VXQ69eYWvJMWNgt92SjmrbKBGIiJTR+vXQpUsYGnrVVZCbCzvskHRU206jhkREyuCbb8Ky0ZMmhWaha69NOqLyUyIQEUnRsmWhU/iDD0IHcdeuSUdUMZQIRERSMG9emCi2Zg2MHg2//W3SEVUc9RGIiJRi0iQ4+ujQQfzmm1UrCYASgYhIsTZvhkGDoF27sHT0O+/AIYckHVXFUyIQESnC/PnQti107x7mCLz1VpgwVhUpEYiIFLBuHfz979CyZUgGjz0Wlo/YY4+kI4uPOotFRCKjRsHll4eVQy+8EPr2hXr1ko4qfqoRiEjW++wzOOecsHJojRowYQIMGZIdSQCUCEQki23aBA89FDaSf/VVuPVWmD079A1kEzUNiUhWmjEDLrkk/GzXDvr1g8aNk44qGaoRiEhW+fZbuPJKOOwwyM8P6wSNHp29SQBiTgRm1t7M3jezPDPrXcTzDcxsgpnNMrM5ZtYhznhEJHu5wwsvQLNmoTno0kth0aKweJxZ0tElK7ZEYGbVgH7AyUBzoKuZNS9U7AZgmLsfCnQB+scVj4hkr8WLoWPH0CG8555hYli/fpm7bHRFi7NGcBiQ5+4fu/uPwFDgtEJlHNg1Oq4NfBZjPCKSZX78Ee66K+wYNmkS3HcfTJsGhx+edGSVS5ydxfsCywqc5wOFf/23AGPN7M9ALeCEot7IzLoD3QEaNGhQ4YGKSNXz1luh+Wf+fDjjjLB3wH77JR1V5ZR0Z3FXYIi71wc6AE+b2VYxuftAd89x95x62TKwV0S2ycqVcPHFcMwxYaXQESPgpZeUBEoSZyL4FCj4q68fXSvoj8AwAHd/B6gJ1I0xJhGpotzhySfDnIAhQ8IWkgsWwKmnJh1Z5RdnIpgGNDGzRmZWg9AZPKJQmaXA8QBm1oyQCL6MMSYRqYIWLoTjjoNu3aBJE5g5E+65B2rVSjqyzBBbInD3jUBPYAywkDA6aL6Z9TGzTlGxa4E/mdlsIBfo5u4eV0wiUrV8/z3ccENYGnr2bBgwIPQNtGiRdGSZJdaZxe4+EhhZ6NpNBY4XAEfFGYOIVE1jx0KPHvDRR3D++XDvvWFoqJRd0p3FIiJlsnx5mAR20kmw3XYwbhw8/bSSQHkoEYhIRti0Cfr3D53BL78Mt9wCc+bA8ccnHVnm06JzIlLpzZoV5gRMnRq++Pv3hwMOSDqqqkM1AhGptNasgauvhpwc+OQTeOYZeP11JYGKphqBiFQ67jB8OFxxRVgh9JJL4M47Yffdk46salKNQEQqlSVLoFMnOPPMsE/w5Mnw6KNKAnFSIhCRSmHDhrBHcPPmMH58OJ4+Hdq0STqyqk9NQyKSuMmTQ2fw3LmhNvDQQ6D1JdNHNQIRScyqVdC9Oxx1FHzzTegXeOUVJYF0UyIQkbRzD5PAmjaFwYPhmmvCAnGnFd6xRNIi5aYhM9vJ3dfFGYyIVH3vvx+Whhg/PmwQM3YstGyZdFTZrdQagZkdaWYLgEXR+SFmpi0lRaRM1q+Hm28OC8LNmBEmhb39tpJAZZBKjeBfwElES0i7+2wzOzbWqESkShk3LtQCPvwQunYNW0butVfSUckWKfURuPuyQpc2xRCLiFQxX3wB550HJ54ImzfDmDHw7LNKApVNKolgmZkdCbiZVTez6wj7C4iIFGnz5jAJ7MAD4fnn4cYbw9DQdu2SjkyKkkrT0KXAA4TN6D8FxgI94gxKRDLX7NlhTsCUKdC2LTzySBgdJJVXKjWCA939PHf/hbvv6e7nA83iDkxEMsvatXDdddC6NeTlwVNPhZFBSgKVXyqJ4KEUr4lIlhoxIiwNce+9cNFFYYjo738PZklHJqkotmnIzNoARwL1zOyaAk/tClSLOzARqfyWLg0rhL7yChx8MOTmhlnCkllKqhHUAHYmJItdCjy+Bc6OPzQRqaw2bgx//TdvHiaE3X03zJypJJCpiq0RuPskYJKZDXH3JWmMSUQqsSlTQmfw7Nlwyinw8MPQsGHSUUl5pDJqaJ2Z9QUOAmpuuejuv40tKhGpdL75Bv72NxgwAPbZB158Ec44Q/0AVUEqncX/Jiwv0Qj4B/AJMC3GmESkEnEPbf9Nm8LAgaFPYOHCsHGMkkDVkEoiqOPujwMb3H2Su/8BUG1AJAt8+CGcdBL87new334wbRrcfz/sskvSkUlFSiURbIh+LjezU8zsUGCPGGMSkYT98AP06QO//jW8+27oB5gyBVq1SjoyiUMqfQS3mVlt4FrC/IFdgaviDEpEkjNhQugM/uADOPfcsEDcPvskHZXEqdQagbu/5u6r3X2eux/n7q2BVWmITUTSaMUKuOAC+O1vw/DQUaNg6FAlgWxQbCIws2pm1tXMrjOzg6NrHc1sMvBw2iIUkVht3gyDBoXO4KFD4frrYd48aN8+6cgkXUpqGnoc2A+YCjxoZp8BOUBvdx+ehthEJGZz54ZmoMmT4dhjwwJxzZsnHZWkW0mJIAdo4e6bzawm8DnwK3dfmZ7QRCQu330XOoPvuw9q14YnnoALL9Rw0GxVUh/Bj+6+GcDd1wMflzUJmFl7M3vfzPLMrHcxZTqb2QIzm29mz5bl/UWk7F57DQ46CO65J/QJLFoE3bopCWSzkmoETc1sTnRswK+icwPc3VuU9MZmVg3oB5wI5APTzGyEuy8oUKYJ8DfgKHf/2sz2LMe9iEgJ8vPhyivhpZegWTOYNCk0B4mUlAjKu+fAYUCeu38MYGZDgdOABQXK/Ano5+5fA7j7inJ+pogUsnFjmAdw443h+I474NproUaNpCOTyqKkRefKu9DcvkDBvY7zgcMLlTkAwMzeJixtfYu7jy78RmbWHegO0KBBg3KGJZI9pk4NncGzZoVRQP36wf77Jx2VVDYpbV4fo+2BJkBboCswyMx2K1zI3Qe6e46759SrVy+9EYpkoNWroWdPOOII+PxzGDYMRo5UEpCixZkIPiUMP92ifnStoHxghLtvcPfFwAeExCAi28AdnnsuzAno3z8kg0WL4Jxz1BksxUspEZjZjmZ2YBnfexrQxMwamVkNoAswolCZ4YTaAGZWl9BU9HEZP0dEgI8+gpNPhi5dwmzgqVPhwQdh112Tjkwqu1ITgZmdCrwHjI7OW5pZ4S/0rbj7RqAnMAZYCAxz9/lm1sfMOkXFxgArzWwBMAHopXkKImXz449w++1hq8jJk+GBB0ISyMlJOjLJFObuJRcwm0FYdnqiux8aXZvr7r9OQ3xbycnJ8enTpyfx0SKVzqRJcNllYX+As84KSWDffZOOSiojM5vh7kX+eZDSMtTuvrrQtZKzh4jE6quv4KKLoG1b+P77MEnshReUBGTbpJII5pvZ74BqZtbEzB4CJsccl4gUYfNmGDwYDjwQnnkGeveG+fPD3sEi2yqVRPBnwn7FPwDPAqvRfgQiaTd/fqgB/PGPYWbwrFlw552w005JRyaZLpWNaZq6+/XA9XEHIyJbW7cObrsN+vYNI4Aeeyw0C22X9CwgqTJSSQT3mtlewAvAc+4+L+aYRCQyahRcfjksXhxWB+3bFzSnUipaKjuUHQccB3wJDDCzuWZ2Q+yRiWSxzz6Dzp2hQ4ewJtCECTBkiJKAxCOlyqW7f+7uDwKXEuYU3BRnUCLZatMmeOihMDN4xAi49VaYPTv0DYjEpdSmITNrBpwLnAWsBJ4jbGQvIhVoxgy45JLws127sEBc48ZJRyXZIJU+gsGEL/+T3P2zmOMRyTrffhuWiH744dD0k5sL556rtYEkfUpNBO7eJh2BiGQbd3jxxbBZzPLlYYbw7bfDbrslHZlkm2ITgZkNc/fOZjaXn88kTmmHMhEp3uLFYWXQkSOhZcuwa9jhhXfrEEmTkmoEV0Y/O6YjEJFs8OOPYcP4Pn3CPID77oM//xm2T6WRViQmxY4acvfl0WEPd19S8AH0SE94IlXHW29Bq1bwt7+F3cIWLoSrr1YSkOSlMnz0xCKunVzRgYhUVStXwsUXwzHHwJo1YVjoSy/BfvuV/lqRdCipj+Aywl/++5vZnAJP7QK8HXdgIpnOHZ56Cq67Dr7+Gnr1gptvhlq1ko5M5OdKqpQ+C4wC7gR6F7i+xt1XxRqVSIZbuDCMApo0Cdq0gUcfhRYaXiGVVElNQ+7unwCXA2sKPDCzPeIPTSTzfP893HADHHJImBE8YEDoG1ASkMqstBpBR2AGYfhowektDuwfY1wiGWfsWOjRI+wdfP75cO+9sOeeSUclUrpiE4G7d4x+NkpfOCKZZ/nyMPrnueegSRMYNw6OPz7pqERSl8rm9UeZWa3o+Hwzu8/MGsQfmkjltmkT9O8fFoh7+WW45RaYM0dJQDJPKsNHHwHWmdkhhMXmPgKejjUqkUpu1iw48siwV8D//R/MnRtGBNWsmXRkImWXSiLY6O4OnAY87O79CENIRbLOmjVwzTWQkwOffBL2DX79dTjggKQjE9l2qcxpXGNmfwN+DxxjZtsB1eMNS6RycYfhw+GKKyA/PywXfeedsPvuSUcmUn6p1AjOJWxc/wd3/xyoD/SNNSqRSmTJEujUCc48E/bYAyZPDvMClASkqkhlq8rPgX8Dtc2sI7De3Z+KPTKRhG3YEPYIbt4cxo8Px9OnhwliIlVJKqOGOgNTgXOAzsC7ZnZ23IGJJGnyZGjdGv7yFzjhhDBT+LrroLoaRaUKSqWP4Hrg/9x9BYCZ1QPGAS/EGZhIElatCquDDhwYFoUbPhxOOy3pqETilUofwXZbkkBkZYqvE8kY7vD002FOwOOPh5FBCxYoCUh2SKVGMNrMxgC50fm5wMj4QhJJr/ffD0tDjB8fdgkbOzbsGiaSLVLZs7iXmZ0JHB1dGujuL8cblkj81q8PQ0Dvugt23DHMEu7eHapVSzoykfQqaT+CJsA/gV8Bc4Hr3P3TdAUmEqdx40It4MMPoWvXsGXkXnslHZVIMkpq6x8MvAacRViB9KGyvrmZtTez980sz8x6l1DuLDNzM8sp62eIlMUXX8B558GJJ4Z+gbFj4dlnlQQku5XUNLSLuw+Kjt83s5lleWMzqwb0I2x1mQ9MM7MR7r6gULldgCuBd8vy/iJlsXlzGAnUu3fYM+Cmm8LoIK0NJFJyIqhpZofyv30Idix47u6lJYbDgDx3/xjAzIYS1itaUKjcrcDdQK8yxi6Sktmz4dJLYcoUaNsWHnkkjA4SkaCkRLAcuK/A+ecFzh34bSnvvS+wrMB5PnB4wQJm1grYz93/Y2bFJgIz6w50B2jQQCtgS2rWrg1LQ99/f1gO4qmnwoYxZqW9UiS7lLQxzXFxfnC0eN19QLfSyrr7QGAgQE5OjscZl1QNI0ZAz56wbBlcfDHcfXdYJ0hEthbnxLBPgf0KnNePrm2xC3AwMNHMPgGOAEaow1jKY+lSOP30MBGsdu2wX/CgQUoCIiWJMxFMA5qYWSMzqwF0AUZsedLdV7t7XXdv6O4NgSlAJ3efHmNMUkVt3Bj2CG7ePIwEuvtumDkTjjoq6chEKr9UZhZvE3ffaGY9gTFANWCwu883sz7AdHcfUfI7iKRmypTQGTx7NpxyCjz8MDRsmHRUIpmj1ERgZgacB+zv7n2i/Yr3cveppb3W3UdSaDkKd7+pmLJtU4pYJPLNN2EI6IABsM8+8OKLcMYZ6gwWKatUmob6A22ArtH5GsL8AJFEuENubhgCOnBg2DVs4cKwcYySgEjZpdI0dLi7tzKzWQDu/nXU5i+Sdnl5YWmI118P+waPHAmtWiUdlUhmS6VGsCGaJezw034Em2ONSqSQH36APn3g4IPh3XdDP8CUKUoCIhUhlRrBg8DLwJ5mdjtwNnBDrFGJFDBhQugM/uADOPfcsEDcPvskHZVI1ZHKMtT/NrMZwPGE5SVOd/eFsUcmWW/FirA95NNPw/77w6hR0L590lGJVD2pjBpqAKwDXi14zd2XxhmYZK/Nm8MuYX/9a1gm4vrrw2PHHZOOTKRqSqVp6D+E/gEDagKNgPeBg2KMS7LUvHmhGejtt+HYY8MCcc2bJx2VSNWWStPQrwueRwvF9YgtIslK330XOoPvuy8sDfHEE3DhhRoOKpIOZZ5Z7O4zzezw0kuKpOa118ICcUuWwB/+EJaHqFs36ahEskcqfQTXFDjdDmgFfBZbRJI18vPhyivhpZegWTOYNCk0B4lIeqUyj2CXAo8dCH0Gp8UZlFRtGzeGPQKaNQsTwu64A957T0lAJCkl1giiiWS7uPt1aYpHqrhp0+CSS2DWrDAUtF+/MDRURJJTbI3AzLZ3902AFvKVclu9OvQDHH44fP45DBsWagNKAiLJK6lGMJXQH/CemY0Ange+2/Kku78Uc2xSBbiHL/2rroIvvgjJ4LbbYNddk45MRLZIZdRQTWAlYY/iLfMJHFAikBJ99BFcfjmMGRPWBHr11bBQnIhULiUlgj2jEUPz+F8C2EL7BkuxfvwR+vYNf/lXrw4PPBASQrVqSUcmIkUpKRFUA3bm5wlgCyUCKdKkSXDZZWF/gLPOCklg332TjkpESlJSIlju7n3SFolktK++gl69YMiQsE3ka6+FbSNFpPIraR6BJvdLqTZvhsGD4cAD4ZlnoHdvmD9fSUAkk5RUIzg+bVFIRpo/PzQD/fe/cNRR8OijYeMYEcksxdYI3H1VOgORzLFuHfz979CyZUgGjz0Gb76pJCCSqcq86Jxkt1GjwgigxYvD6qB9+0K9eklHJSLlkcpaQyJ89hl07gwdOkCNGmH7yCFDlAREqgIlAinRpk3w0EPQtCmMGAG33gqzZ0PbtklHJiIVRU1DUqwZM8ICcTNmQLt2YYG4xo2TjkpEKppqBLKVb78N+wQcdljYMyA3F0aPVhIQqapUI5CfuMOLL4YksHx5GBp6++2w225JRyYicVKNQIAwCqhjRzjnHNhzT3jnndAUpCQgUvUpEWS5H3+Eu+6Cgw4K6wTdd1/YPOZw7UotkjXUNJTF3noLLr00TAo744ywQNx++yUdlYikW6w1AjNrb2bvm1memfUu4vlrzGyBmc0xszfM7JdxxiPBypVw8cVwzDGwZk0YFvrSS0oCItkqtkQQ7XfcDzgZaA50NbPmhYrNAnLcvQXwAnBPXPFI6Ax+8skwJ2DIkLBa6IIFcOqpSUcmIkmKs0ZwGJDn7h+7+4/AUOC0ggXcfYK7r4tOpwD1Y4wnqy1aBMcdB926QZMmMHMm3HMP1KqVdGQikrQ4E8G+wLIC5/nRteL8ERhV1BNm1t3MppvZ9C+//LICQ6z6vv8ebrwRWrQIM4IHDgx9Ay1aJB2ZiFQWlaKz2MzOB3KA3xT1vLsPBAYC5OTkaHe0FI0dCz16hL2Dzz8f7r03DA0VESkozhrBp0DB7sf60bWfMbMTgOuBTu7+Q4zxZI3ly6FLFzjpJNhuOxg3Dp5+WklARIoWZyKYBjQxs0ZmVgPoAowoWMDMDgUGEJLAihhjyQqbNkH//qEz+OWX4ZZbYM4cOF5bDIlICWJrGnL3jWbWExgDVAMGu/t8M+sDTHf3EUBfYGfgeTMDWOruneKKqSqbNSvMCZg6NXzx9+8PBxyQdFQikgli7SNw95HAyELXbipwfEKcn58N1qyBm28Ok8Hq1g37Bv/ud2DacVpEUlQpOoul7Nxh+HC44oqwQugll8Cdd8LuuycdmYhkGq01lIGWLIFOneDMM2GPPWDy5LBxvJKAiGwLJYIMsmFD2CO4eXMYPz4cT58ObdokHZmIZDI1DWWIyZNDZ/DcuaE28NBD0KBB0lGJSFWgGkElt2pVaP8/6ij45pvQL/DKK0oCIlJxlAgqKfcwCaxpU3j8cbjmmrBA3Gmnlf5aEZGyUNNQJfT++2FpiPHjwwYxY8dCy5ZJRyUiVZVqBJXI+vVhTkCLFjBjRpgU9vbbSgIiEi/VCCqJceNCLeDDD6Fr17Bl5F57JR2ViGQD1QgS9sUXYWXQE08M/QJjx8KzzyoJiEj6KBEkZPNmGDAgdAY//zzcdFMYGnriiUlHJiLZRk1DCZg9O8wJmDIF2raFRx4JCUFEJAmqEaTR2rVw3XXQujXk5cFTT4WRQUoCIpIk1QjSZMQI6NkTli2Diy+Gu+8O6wSJiCRNNYKYLVsGp58eJoLVrh32Cx40SElARCoPJYKYbNwYhoA2axZGAt19N8ycGZaKEBGpTNQ0FIMpU0Jn8OzZcMop8PDD0LBh0lGJiBRNNYIK9M03cNllcOSR8NVX8OKL8OqrSgIiUrkpEVQAd8jNDaN/Bg4Mu4YtXBg2jtGWkSJS2alpqJzy8sLSEK+/Djk5MHIktGqVdFQiIqlTjWAb/fAD9OkDBx8M774b+gGmTFESEJHMoxrBNpgwIXQGf/ABnHtuGB20zz5JRyVS+WzYsIH8/HzWr1+fdChZo2bNmtSvX5/q1aun/BolgjJYsSLMDH76adh/fxg1Ctq3TzoqkcorPz+fXXbZhYYNG2LqMIudu7Ny5Ury8/Np1KhRyq9T01AKNm8Ok8CaNoWhQ+H662HePCUBkdKsX7+eOnXqKAmkiZlRp06dMtfAVCMoxbx5oRno7bfh2GPDAnHNmycdlUjmUBJIr235fatGUIzvvoO//hUOPRQWLYInnoCJE5UERKTqUSIowmuvwUEHwT33wAUXhETQrZvmBIhkquHDh2NmLFq06KdrEydOpGPHjj8r161bN1544QUgdHT37t2bJk2a0KpVK9q0acOoUaPKHcudd95J48aNOfDAAxkzZkyJZa+44gp23nnnn86XLl3Kcccdx6GHHkqLFi0YOXJkueMBJYKfyc+Hs86CU0+FnXaCSZPg8cehbt2kIxOR8sjNzeXoo48mNzc35dfceOONLF++nHnz5jFz5kyGDx/OmjVryhXHggULGDp0KPPnz2f06NH06NGDTZs2FVl2+vTpfP311z+7dtttt9G5c2dmzZrF0KFD6dGjR7ni2UJ9BIQF4h5+GG68MRzfcQdcey3UqJF0ZCJVx1VXwXvvVex7tmwJ999fcpm1a9fy1ltvMWHCBE499VT+8Y9/lPq+69atY9CgQSxevJgddtgBgF/84hd07ty5XPG+8sordOnShR122IFGjRrRuHFjpk6dSps2bX5WbtOmTfTq1Ytnn32Wl19++afrZsa3334LwOrVq9mngsatZ30imDYNLrkEZs0Ko4D69QtDQ0WkanjllVdo3749BxxwAHXq1GHGjBm0bt26xNfk5eXRoEEDdt1111Lf/+qrr2bChAlbXe/SpQu9e/f+2bVPP/2UI4444qfz+vXr8+mnn2712ocffphOnTqx9957/+z6LbfcQrt27XjooYf47rvvGDduXKnxpSJrE8Hq1WEYaP/+YaP4YcPg7LPVDyASl9L+co9Lbm4uV155JRC+nHNzc2ndunWxo2vKOurmX//6V7ljLOizzz7j+eefZ+LEiVs9l5ubS7du3bj22mt55513+P3vf8+8efPYbrvytfLHmgjMrD3wAFANeMzd7yr0/A7AU0BrYCVwrrt/EmdM7uFL/6qr4Isvwq5ht90GKSR+Eckwq1atYvz48cydOxczY9OmTZgZffv2pU6dOlu1wa9atYq6devSuHFjli5dyrfffltqraAsNYJ9992XZcuW/XSen5/Pvvvu+7Mys2bNIi8vj8aNGwOhmapx48bk5eXx+OOPM3r0aADatGnD+vXr+eqrr9hzzz1T/6UUxd1jeRC+/D8C9gdqALOB5oXK9AAejY67AM+V9r6tW7f2bZWX537SSe7g3qqV+7Rp2/xWIpKCBQsWJPr5AwYM8O7du//s2rHHHuuTJk3y9evXe8OGDX+K8ZNPPvEGDRr4N9984+7uvXr18m7duvkPP/zg7u4rVqzwYcOGlSueefPmeYsWLXz9+vX+8ccfe6NGjXzjxo0lvqZWrVo/Hbdv396feOIJdw+/27333ts3b9681WuK+r0D072Y79U4Rw0dBuS5+8fu/iMwFDitUJnTgCej4xeA4y2m2SeDB4cF4iZPhgcegKlTw2qhIlJ15ebmcsYZZ/zs2llnnUVubi477LADzzzzDBdddBEtW7bk7LPP5rHHHqN27dpAGKFTr149mjdvzsEHH0zHjh1T6jMoyUEHHUTnzp1p3rw57du3p1+/flSrVg2ADh068Nlnn5X4+nvvvZdBgwZxyCGH0LVrV4YMGVIhE/YsJIqKZ2ZnA+3d/eLo/PfA4e7es0CZeVGZ/Oj8o6jMV4XeqzvQHaBBgwatlyxZUuZ43norJID774dCNTERicnChQtp1qxZ0mFknaJ+72Y2w92L/PM3IzqL3X0gMBAgJydnmzLX0UeHh4iI/FycTUOfAvsVOK8fXSuyjJltD9QmdBqLiEiaxJkIpgFNzKyRmdUgdAaPKFRmBHBhdHw2MN7jaqsSkUTof+n02pbfd2yJwN03Aj2BMcBCYJi7zzezPmbWKSr2OFDHzPKAa4DeRb+biGSimjVrsnLlSiWDNPFoP4KaNWuW6XWxdRbHJScnx6dPn550GCKSAu1Qln7F7VCW8Z3FIpKZqlevXqadsiQZWn1URCTLKRGIiGQ5JQIRkSyXcZ3FZvYlUPapxUFd4KtSS1UtuufsoHvODuW551+6e72insi4RFAeZja9uF7zqkr3nB10z9khrntW05CISJZTIhARyXLZlggGJh1AAnTP2UH3nB1iuees6iMQEZGtZVuNQEREClEiEBHJclUyEZhZezN738zyzGyrFU3NbAczey56/l0za5hAmBUqhXu+xswWmNkcM3vDzH6ZRJwVqbR7LlDuLDNzM8v4oYap3LOZdY7+reeb2bPpjrGipfDfdgMzm2Bms6L/vjskEWdFMbPBZrYi2sGxqOfNzB6Mfh9zzKxVuT+0uM2MM/UBVAM+AvYHagCzgeaFyvQAHo2OuwDPJR13Gu75OGCn6PiybLjnqNwuwJvAFCAn6bjT8O/cBJgF7B6d75l03Gm454HAZdFxc+CTpOMu5z0fC7QC5hXzfAdgFGDAEcC75f3MqlgjOAzIc/eP3f1HYChwWqEypwFPRscvAMdbRewAnZxS79ndJ7j7uuh0CmHHuEyWyr8zwK3A3UBVWAc5lXv+E9DP3b8GcPcVaY6xoqVyzw5s2VW+NlDyDvCVnLu/CawqochpwFMeTAF2M7O9y/OZVTER7AssK3CeH10rsoyHDXRWA3XSEl08Urnngv5I+Isik5V6z1GVeT93/086A4tRKv/OBwAHmNnbZjbFzNqnLbp4pHLPtwDnm1k+MBL4c3pCS0xZ/38vlfYjyDJmdj6QA/wm6VjiZGbbAfcB3RIOJd22JzQPtSXU+t40s1+7+zdJBhWzrsAQd7/XzNoAT5vZwe6+OenAMkVVrBF8CuxX4Lx+dK3IMma2PaE6uTIt0cUjlXvGzE4Argc6ufsPaYotLqXd8y7AwcBEM/uE0JY6IsM7jFP5d84HRrj7BndfDHxASAyZKpV7/iMwDMDd3wFqEhZnq6pS+v+9LKpiIpgGNDGzRmZWg9AZPKJQmRHAhdHx2cB4j3phMlSp92xmhwIDCEkg09uNoZR7dvfV7l7X3Ru6e0NCv0gnd8/kfU5T+W97OKE2gJnVJTQVfZzGGCtaKve8FDgewMyaERLBl2mNMr1GABdEo4eOAFa7+/LyvGGVaxpy941m1hMYQxhxMNjd55tZH2C6u48AHidUH/MInTJdkou4/FK8577AzsDzUb/4UnfvlFjQ5ZTiPVcpKd7zGKCdmS0ANgG93D1ja7sp3vO1wCAzu5rQcdwtk/+wM7NcQjKvG/V73AxUB3D3Rwn9IB2APGAdcFG5PzODf18iIlIBqmLTkIiIlIESgYhIllMiEBHJckoEIiJZTolARCTLKRFIpWRmm8zsvQKPhiWUXVsBnzfEzBZHnzUzmqFa1vd4zMyaR8d/L/Tc5PLGGL3Plt/LPDN71cx2K6V8y0xfjVPip+GjUimZ2Vp337miy5bwHkOA19z9BTNrB/zT3VuU4/3KHVNp72tmTwIfuPvtJZTvRlh1tWdFxyJVh2oEkhHMbOdoH4WZZjbXzLZaadTM9jazNwv8xXxMdL2dmb0TvfZ5MyvtC/pNoHH02mui95pnZldF12qZ2X/MbHZ0/dzo+kQzyzGzu4Adozj+HT23Nvo51MxOKRDzEDM728yqmVlfM5sWrTF/SQq/lneIFhszs8Oie5xlZpPN7MBoJm4f4NwolnOj2Aeb2dSobFErtkq2SXrtbT30KOpBmBX7XvR4mTALftfoubqEWZVbarRro5/XAtdHx9UI6w3VJXyx14qu/xW4qYjPGwKcHR2fA7wLtAbmArUIs7LnA4cCZwGDCry2dvRzItGeB1tiKlBmS4xnAE9GxzUIq0juCHQHboiu7wBMBxoVEefaAvf3PNA+Ot8V2D46PgF4MTruBjxc4PV3AOdHx7sR1iKqlfS/tx7JPqrcEhNSZXzv7i23nJhZdeAOMzsW2Ez4S/gXwOcFXjMNGByVHe7u75nZbwiblbwdLa1Rg/CXdFH6mtkNhHVq/khYv+Zld/8uiuEl4BhgNHCvmd1NaE76bxnuaxTwgJntALQH3nT376PmqBZmdnZUrjZhsbjFhV6/o5m9F93/QuD1AuWfNLMmhGUWqhfz+e2ATmZ2XXReE2gQvZdkKSUCyRTnAfWA1u6+wcKKojULFnD3N6NEcQowxMzuA74GXnf3ril8Ri93f2HLiZkdX1Qhd//Awl4HHYDbzOwNd++Tyk24+3ozmwicBJxL2GgFwm5Tf3b3MaW8xffu3tLMdiKsv3M58CBhA54J7n5G1LE+sZjXG3CWu7+fSrySHdRHIJmiNrAiSgLHAVvtuWxhH+Yv3H0Q8Bhhu78pwFFmtqXNv5aZHZDiZ/4XON3MdjKzWoRmnf+a2T7AOnd/hrCYX1F7xm6IaiZFeY6wUNiW2gWEL/XLtrzGzA6IPrNIHnabuwK41v63lPqWpYi7FSi6htBEtsUY4M8WVY8srEorWU6JQDLFv4EcM5sLXAAsKqJMW2C2mc0i/LX9gLt/SfhizDWzOYRmoaapfKC7zyT0HUwl9Bk85u6zgF8DU6MmmpuB24p4+UBgzpbO4kLGEjYGGudh+0UIiWsBMNPCpuUDKKXGHsUyh7Axyz3AndG9F3zdBKD5ls5iQs2hehTb/OhcspyGj4qIZDnVCEREspwSgYhIllMiEBHJckoEIiJZTolARCTLKRGIiGQ5JQIRkSz3/5eisarLeuiQAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.title('ROC')\n",
    "plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)\n",
    "plt.legend(loc = 'lower right')\n",
    "plt.ylabel('True Positive Rate')\n",
    "plt.xlabel('False Positive Rate')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Pipline Example: AutoEncoder"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Build Pipeline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'inputs.0'"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Creating pipeline\n",
    "pipeline_description = Pipeline()\n",
    "pipeline_description.add_input(name='inputs')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "While loading primitive 'tods.data_processing.dataset_to_dataframe', an error has been detected: (networkx 2.5 (/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages), Requirement.parse('networkx==2.4'), {'tamu-axolotl'})\n",
      "Attempting to load primitive 'tods.data_processing.dataset_to_dataframe' without checking requirements.\n"
     ]
    }
   ],
   "source": [
    "# Step 0: dataset_to_dataframe\n",
    "step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe'))\n",
    "step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0')\n",
    "step_0.add_output('produce')\n",
    "pipeline_description.add_step(step_0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "While loading primitive 'tods.data_processing.column_parser', an error has been detected: (networkx 2.5 (/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages), Requirement.parse('networkx==2.4'), {'tamu-axolotl'})\n",
      "Attempting to load primitive 'tods.data_processing.column_parser' without checking requirements.\n"
     ]
    }
   ],
   "source": [
    "# Step 1: column_parser\n",
    "step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser'))\n",
    "step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce')\n",
    "step_1.add_output('produce')\n",
    "pipeline_description.add_step(step_1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "While loading primitive 'tods.data_processing.extract_columns_by_semantic_types', an error has been detected: (networkx 2.5 (/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages), Requirement.parse('networkx==2.4'), {'tamu-axolotl'})\n",
      "Attempting to load primitive 'tods.data_processing.extract_columns_by_semantic_types' without checking requirements.\n"
     ]
    }
   ],
   "source": [
    "# Step 2: extract_columns_by_semantic_types(attributes)\n",
    "step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types'))\n",
    "step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce')\n",
    "step_2.add_output('produce')\n",
    "step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE,\n",
    "\t\t\t\t\t\t\t  data=['https://metadata.datadrivendiscovery.org/types/Attribute'])\n",
    "pipeline_description.add_step(step_2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Step 3: extract_columns_by_semantic_types(targets)\n",
    "step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types'))\n",
    "step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce')\n",
    "step_3.add_output('produce')\n",
    "step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE,\n",
    "\t\t\t\t\t\t\tdata=['https://metadata.datadrivendiscovery.org/types/TrueTarget'])\n",
    "pipeline_description.add_step(step_3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "attributes = 'steps.2.produce'\n",
    "targets = 'steps.3.produce'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "While loading primitive 'tods.feature_analysis.statistical_maximum', an error has been detected: (networkx 2.5 (/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages), Requirement.parse('networkx==2.4'), {'tamu-axolotl'})\n",
      "Attempting to load primitive 'tods.feature_analysis.statistical_maximum' without checking requirements.\n"
     ]
    }
   ],
   "source": [
    "# Step 4: processing\n",
    "step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_maximum'))\n",
    "step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes)\n",
    "step_4.add_output('produce')\n",
    "pipeline_description.add_step(step_4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "While loading primitive 'tods.detection_algorithm.pyod_ae', an error has been detected: (networkx 2.5 (/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages), Requirement.parse('networkx==2.4'), {'tamu-axolotl'})\n",
      "Attempting to load primitive 'tods.detection_algorithm.pyod_ae' without checking requirements.\n"
     ]
    }
   ],
   "source": [
    "# Step 5: algorithm`\n",
    "step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_ae'))\n",
    "step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce')\n",
    "step_5.add_output('produce')\n",
    "pipeline_description.add_step(step_5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "While loading primitive 'tods.data_processing.construct_predictions', an error has been detected: (networkx 2.5 (/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages), Requirement.parse('networkx==2.4'), {'tamu-axolotl'})\n",
      "Attempting to load primitive 'tods.data_processing.construct_predictions' without checking requirements.\n"
     ]
    }
   ],
   "source": [
    "# Step 6: Predictions\n",
    "step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.construct_predictions'))\n",
    "step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce')\n",
    "step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce')\n",
    "step_6.add_output('produce')\n",
    "pipeline_description.add_step(step_6)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'outputs.0'"
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Final Output\n",
    "pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{\"id\": \"5ea6f8e5-e938-43e3-9dd4-4c9451bb8821\", \"schema\": \"https://metadata.datadrivendiscovery.org/schemas/v0/pipeline.json\", \"created\": \"2021-04-14T16:15:48.973138Z\", \"inputs\": [{\"name\": \"inputs\"}], \"outputs\": [{\"data\": \"steps.6.produce\", \"name\": \"output predictions\"}], \"steps\": [{\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"4b42ce1e-9b98-4a25-b68e-fad13311eb65\", \"version\": \"0.3.0\", \"python_path\": \"d3m.primitives.tods.data_processing.dataset_to_dataframe\", \"name\": \"Extract a DataFrame from a Dataset\", \"digest\": \"fb5cd27ebf69b9587b23940618071ba9ffe9f47ebd7772797d61ae0521f92515\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"inputs.0\"}}, \"outputs\": [{\"id\": \"produce\"}]}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"d510cb7a-1782-4f51-b44c-58f0236e47c7\", \"version\": \"0.6.0\", \"python_path\": \"d3m.primitives.tods.data_processing.column_parser\", \"name\": \"Parses strings into their types\", \"digest\": \"62af3e97e2535681a0b1320e4ac97edeba15895862a46244ab079c47ce56958d\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.0.produce\"}}, \"outputs\": [{\"id\": \"produce\"}]}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"4503a4c6-42f7-45a1-a1d4-ed69699cf5e1\", \"version\": \"0.4.0\", \"python_path\": \"d3m.primitives.tods.data_processing.extract_columns_by_semantic_types\", \"name\": \"Extracts columns by semantic type\", \"digest\": \"d4c8204514d840de1b5acad9831f9d5581b41f425df3d14051336abdeacdf1b2\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.1.produce\"}}, \"outputs\": [{\"id\": \"produce\"}], \"hyperparams\": {\"semantic_types\": {\"type\": \"VALUE\", \"data\": [\"https://metadata.datadrivendiscovery.org/types/Attribute\"]}}}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"4503a4c6-42f7-45a1-a1d4-ed69699cf5e1\", \"version\": \"0.4.0\", \"python_path\": \"d3m.primitives.tods.data_processing.extract_columns_by_semantic_types\", \"name\": \"Extracts columns by semantic type\", \"digest\": \"d4c8204514d840de1b5acad9831f9d5581b41f425df3d14051336abdeacdf1b2\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.0.produce\"}}, \"outputs\": [{\"id\": \"produce\"}], \"hyperparams\": {\"semantic_types\": {\"type\": \"VALUE\", \"data\": [\"https://metadata.datadrivendiscovery.org/types/TrueTarget\"]}}}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"3b448057-ac26-4f1b-96b6-141782f16a54\", \"version\": \"0.1.0\", \"python_path\": \"d3m.primitives.tods.feature_analysis.statistical_maximum\", \"name\": \"Time Series Decompostional\", \"digest\": \"922b594bd6c0894d57f6ebf5a54ccae6d69dab67326bd591c8c25e3a3dea6781\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.2.produce\"}}, \"outputs\": [{\"id\": \"produce\"}]}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"67e7fcdf-d645-3417-9aa4-85cd369487d9\", \"version\": \"0.0.1\", \"python_path\": \"d3m.primitives.tods.detection_algorithm.pyod_ae\", \"name\": \"TODS.anomaly_detection_primitives.AutoEncoder\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.4.produce\"}}, \"outputs\": [{\"id\": \"produce\"}]}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"8d38b340-f83f-4877-baaa-162f8e551736\", \"version\": \"0.3.0\", \"python_path\": \"d3m.primitives.tods.data_processing.construct_predictions\", \"name\": \"Construct pipeline predictions output\", \"digest\": \"33d90bfb7f97f47a6de5372c5f912c26fca8da2d2777661651c69687ad6f9950\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.5.produce\"}, \"reference\": {\"type\": \"CONTAINER\", \"data\": \"steps.1.produce\"}}, \"outputs\": [{\"id\": \"produce\"}]}], \"digest\": \"fc87321fbbe0b4faa956958d39d41d2cafd02700a3ed7ba80b01e80cace8d07e\"}\n"
     ]
    }
   ],
   "source": [
    "# Output to json\n",
    "data = pipeline_description.to_json()\n",
    "with open('autoencoder_pipeline.json', 'w') as f:\n",
    "    f.write(data)\n",
    "    print(data)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Run Pipeline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "this_path = os.path.dirname(os.path.abspath(\"__file__\"))\n",
    "default_data_path = os.path.join(this_path, '../../datasets/anomaly/raw_data/yahoo_sub_5.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "_StoreAction(option_strings=['--pipeline_path'], dest='pipeline_path', nargs=None, const=None, default='/Users/wangyanghe/Desktop/Research/Tods Notebook/src/tods/examples/sk_examples/autoencoder_pipeline.json', type=None, choices=None, help='Input the path of the pre-built pipeline description', metavar=None)"
      ]
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "parser = argparse.ArgumentParser(description='Arguments for running predefined pipelin.')\n",
    "parser.add_argument('--table_path', type=str, default=default_data_path,\n",
    "                    help='Input the path of the input data table')\n",
    "parser.add_argument('--target_index', type=int, default=6,\n",
    "                    help='Index of the ground truth (for evaluation)')\n",
    "parser.add_argument('--metric',type=str, default='F1_MACRO',\n",
    "                    help='Evaluation Metric (F1, F1_MACRO)')\n",
    "parser.add_argument('--pipeline_path', \n",
    "                    default=os.path.join(this_path, 'autoencoder_pipeline.json'),\n",
    "                    help='Input the path of the pre-built pipeline description')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [],
   "source": [
    "args, unknown = parser.parse_known_args()\n",
    "table_path = args.table_path \n",
    "target_index = args.target_index # what column is the target\n",
    "pipeline_path = args.pipeline_path\n",
    "metric = args.metric # F1 on both label 0 and 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Read data and generate dataset\n",
    "df = pd.read_csv(table_path)\n",
    "dataset = generate_dataset(df, target_index)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load the default pipeline\n",
    "pipeline = load_pipeline(pipeline_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Not all provided hyper-parameters for the data preparation pipeline 79ce71bd-db96-494b-a455-14f2e2ac5040 were used: ['method', 'number_of_folds', 'randomSeed', 'shuffle', 'stratified']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_2\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense_2 (Dense)              (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_2 (Dropout)          (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_3 (Dense)              (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_3 (Dropout)          (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_4 (Dense)              (None, 1)                 13        \n",
      "_________________________________________________________________\n",
      "dropout_4 (Dropout)          (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_5 (Dense)              (None, 4)                 8         \n",
      "_________________________________________________________________\n",
      "dropout_5 (Dropout)          (None, 4)                 0         \n",
      "_________________________________________________________________\n",
      "dense_6 (Dense)              (None, 1)                 5         \n",
      "_________________________________________________________________\n",
      "dropout_6 (Dropout)          (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_7 (Dense)              (None, 12)                24        \n",
      "=================================================================\n",
      "Total params: 362\n",
      "Trainable params: 362\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "None\n",
      "Epoch 1/100\n",
      "40/40 [==============================] - 0s 6ms/step - loss: 2.1020 - val_loss: 1.3966\n",
      "Epoch 2/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.8250 - val_loss: 1.2834\n",
      "Epoch 3/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.7095 - val_loss: 1.2056\n",
      "Epoch 4/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.6036 - val_loss: 1.1504\n",
      "Epoch 5/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.5416 - val_loss: 1.1075\n",
      "Epoch 6/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.4905 - val_loss: 1.0713\n",
      "Epoch 7/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.4248 - val_loss: 1.0404\n",
      "Epoch 8/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.4080 - val_loss: 1.0133\n",
      "Epoch 9/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.3664 - val_loss: 0.9888\n",
      "Epoch 10/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.3319 - val_loss: 0.9664\n",
      "Epoch 11/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.2825 - val_loss: 0.9456\n",
      "Epoch 12/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2695 - val_loss: 0.9260\n",
      "Epoch 13/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2545 - val_loss: 0.9075\n",
      "Epoch 14/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2153 - val_loss: 0.8899\n",
      "Epoch 15/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2071 - val_loss: 0.8733\n",
      "Epoch 16/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1693 - val_loss: 0.8575\n",
      "Epoch 17/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1569 - val_loss: 0.8424\n",
      "Epoch 18/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1470 - val_loss: 0.8280\n",
      "Epoch 19/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1229 - val_loss: 0.8143\n",
      "Epoch 20/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.1088 - val_loss: 0.8011\n",
      "Epoch 21/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0923 - val_loss: 0.7885\n",
      "Epoch 22/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0745 - val_loss: 0.7764\n",
      "Epoch 23/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0592 - val_loss: 0.7648\n",
      "Epoch 24/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0476 - val_loss: 0.7537\n",
      "Epoch 25/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0341 - val_loss: 0.7430\n",
      "Epoch 26/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0216 - val_loss: 0.7328\n",
      "Epoch 27/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0110 - val_loss: 0.7230\n",
      "Epoch 28/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9972 - val_loss: 0.7136\n",
      "Epoch 29/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9867 - val_loss: 0.7046\n",
      "Epoch 30/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9769 - val_loss: 0.6959\n",
      "Epoch 31/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9668 - val_loss: 0.6876\n",
      "Epoch 32/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9543 - val_loss: 0.6797\n",
      "Epoch 33/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9452 - val_loss: 0.6720\n",
      "Epoch 34/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9357 - val_loss: 0.6647\n",
      "Epoch 35/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9327 - val_loss: 0.6576\n",
      "Epoch 36/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9219 - val_loss: 0.6509\n",
      "Epoch 37/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9144 - val_loss: 0.6443\n",
      "Epoch 38/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9061 - val_loss: 0.6381\n",
      "Epoch 39/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8996 - val_loss: 0.6321\n",
      "Epoch 40/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8924 - val_loss: 0.6263\n",
      "Epoch 41/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8858 - val_loss: 0.6207\n",
      "Epoch 42/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8793 - val_loss: 0.6153\n",
      "Epoch 43/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8744 - val_loss: 0.6102\n",
      "Epoch 44/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8671 - val_loss: 0.6052\n",
      "Epoch 45/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8610 - val_loss: 0.6004\n",
      "Epoch 46/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8574 - val_loss: 0.5958\n",
      "Epoch 47/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8517 - val_loss: 0.5913\n",
      "Epoch 48/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8469 - val_loss: 0.5870\n",
      "Epoch 49/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8431 - val_loss: 0.5829\n",
      "Epoch 50/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8371 - val_loss: 0.5789\n",
      "Epoch 51/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8338 - val_loss: 0.5750\n",
      "Epoch 52/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8288 - val_loss: 0.5713\n",
      "Epoch 53/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8245 - val_loss: 0.5677\n",
      "Epoch 54/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8207 - val_loss: 0.5642\n",
      "Epoch 55/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8179 - val_loss: 0.5608\n",
      "Epoch 56/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8163 - val_loss: 0.5576\n",
      "Epoch 57/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8105 - val_loss: 0.5545\n",
      "Epoch 58/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8069 - val_loss: 0.5514\n",
      "Epoch 59/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8037 - val_loss: 0.5485\n",
      "Epoch 60/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8004 - val_loss: 0.5457\n",
      "Epoch 61/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7974 - val_loss: 0.5429\n",
      "Epoch 62/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7941 - val_loss: 0.5403\n",
      "Epoch 63/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7915 - val_loss: 0.5377\n",
      "Epoch 64/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7891 - val_loss: 0.5352\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 65/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7860 - val_loss: 0.5328\n",
      "Epoch 66/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7836 - val_loss: 0.5305\n",
      "Epoch 67/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7811 - val_loss: 0.5282\n",
      "Epoch 68/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7792 - val_loss: 0.5260\n",
      "Epoch 69/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7770 - val_loss: 0.5238\n",
      "Epoch 70/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7745 - val_loss: 0.5218\n",
      "Epoch 71/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7725 - val_loss: 0.5197\n",
      "Epoch 72/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7704 - val_loss: 0.5178\n",
      "Epoch 73/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7683 - val_loss: 0.5159\n",
      "Epoch 74/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7663 - val_loss: 0.5141\n",
      "Epoch 75/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7644 - val_loss: 0.5123\n",
      "Epoch 76/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7627 - val_loss: 0.5106\n",
      "Epoch 77/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7608 - val_loss: 0.5089\n",
      "Epoch 78/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7591 - val_loss: 0.5073\n",
      "Epoch 79/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7576 - val_loss: 0.5057\n",
      "Epoch 80/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7559 - val_loss: 0.5041\n",
      "Epoch 81/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7544 - val_loss: 0.5026\n",
      "Epoch 82/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7529 - val_loss: 0.5011\n",
      "Epoch 83/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7514 - val_loss: 0.4997\n",
      "Epoch 84/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7500 - val_loss: 0.4983\n",
      "Epoch 85/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7486 - val_loss: 0.4970\n",
      "Epoch 86/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7473 - val_loss: 0.4957\n",
      "Epoch 87/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7460 - val_loss: 0.4944\n",
      "Epoch 88/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7448 - val_loss: 0.4931\n",
      "Epoch 89/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7436 - val_loss: 0.4919\n",
      "Epoch 90/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7424 - val_loss: 0.4907\n",
      "Epoch 91/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7412 - val_loss: 0.4896\n",
      "Epoch 92/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7402 - val_loss: 0.4885\n",
      "Epoch 93/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7390 - val_loss: 0.4874\n",
      "Epoch 94/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7380 - val_loss: 0.4863\n",
      "Epoch 95/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7370 - val_loss: 0.4853\n",
      "Epoch 96/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7360 - val_loss: 0.4842\n",
      "Epoch 97/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7350 - val_loss: 0.4833\n",
      "Epoch 98/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7341 - val_loss: 0.4823\n",
      "Epoch 99/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7332 - val_loss: 0.4814\n",
      "Epoch 100/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7323 - val_loss: 0.4804\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n",
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'method_called': 'evaluate',\n",
      " 'outputs': \"[{'outputs.0':      d3mIndex  anomaly\"\n",
      "            '0           0        1'\n",
      "            '1           1        0'\n",
      "            '2           2        1'\n",
      "            '3           3        1'\n",
      "            '4           4        1'\n",
      "            '...       ...      ...'\n",
      "            '1395     1395        1'\n",
      "            '1396     1396        0'\n",
      "            '1397     1397        1'\n",
      "            '1398     1398        1'\n",
      "            '1399     1399        1'\n",
      "            ''\n",
      "            \"[1400 rows x 2 columns]}, {'outputs.0':      d3mIndex  anomaly\"\n",
      "            '0           0        1'\n",
      "            '1           1        0'\n",
      "            '2           2        1'\n",
      "            '3           3        1'\n",
      "            '4           4        1'\n",
      "            '...       ...      ...'\n",
      "            '1395     1395        1'\n",
      "            '1396     1396        0'\n",
      "            '1397     1397        1'\n",
      "            '1398     1398        1'\n",
      "            '1399     1399        1'\n",
      "            ''\n",
      "            '[1400 rows x 2 columns]}]',\n",
      " 'pipeline': '<d3m.metadata.pipeline.Pipeline object at 0x156c41358>',\n",
      " 'scores': '     metric     value  normalized  randomSeed  fold'\n",
      "           '0  F1_MACRO  0.509059    0.509059           0     0',\n",
      " 'status': 'COMPLETED'}\n"
     ]
    }
   ],
   "source": [
    "# Run the pipeline\n",
    "pipeline_result = evaluate_pipeline(dataset, pipeline, metric)\n",
    "print(pipeline_result)\n",
    "#raise pipeline_result.error[0]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Searcher Example:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [],
   "source": [
    "table_path = '../../datasets/anomaly/raw_data/yahoo_sub_5.csv'\n",
    "target_index = 6 # what column is the target\n",
    "time_limit = 30 # How many seconds you wanna search"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [],
   "source": [
    "metric = 'F1_MACRO' # F1 on both label 0 and 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Read data and generate dataset and problem\n",
    "df = pd.read_csv(table_path)\n",
    "dataset = generate_dataset(df, target_index=target_index)\n",
    "problem_description = generate_problem(dataset, metric)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Start backend\n",
    "backend = SimpleRunner(random_seed=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Start search algorithm\n",
    "search = BruteForceSearch(problem_description=problem_description,\n",
    "                          backend=backend)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Not all provided hyper-parameters for the data preparation pipeline 79ce71bd-db96-494b-a455-14f2e2ac5040 were used: ['method', 'number_of_folds', 'randomSeed', 'shuffle', 'stratified']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_3\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense_8 (Dense)              (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_7 (Dropout)          (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_9 (Dense)              (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_8 (Dropout)          (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_10 (Dense)             (None, 1)                 13        \n",
      "_________________________________________________________________\n",
      "dropout_9 (Dropout)          (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_11 (Dense)             (None, 4)                 8         \n",
      "_________________________________________________________________\n",
      "dropout_10 (Dropout)         (None, 4)                 0         \n",
      "_________________________________________________________________\n",
      "dense_12 (Dense)             (None, 1)                 5         \n",
      "_________________________________________________________________\n",
      "dropout_11 (Dropout)         (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_13 (Dense)             (None, 12)                24        \n",
      "=================================================================\n",
      "Total params: 362\n",
      "Trainable params: 362\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "None\n",
      "Epoch 1/100\n",
      "40/40 [==============================] - 0s 4ms/step - loss: 1.5944 - val_loss: 1.2184\n",
      "Epoch 2/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.3842 - val_loss: 1.1148\n",
      "Epoch 3/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2952 - val_loss: 1.0463\n",
      "Epoch 4/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2214 - val_loss: 0.9919\n",
      "Epoch 5/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.1586 - val_loss: 0.9538\n",
      "Epoch 6/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.1199 - val_loss: 0.9192\n",
      "Epoch 7/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0858 - val_loss: 0.8896\n",
      "Epoch 8/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0449 - val_loss: 0.8645\n",
      "Epoch 9/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0290 - val_loss: 0.8419\n",
      "Epoch 10/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0031 - val_loss: 0.8217\n",
      "Epoch 11/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9821 - val_loss: 0.8030\n",
      "Epoch 12/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9619 - val_loss: 0.7847\n",
      "Epoch 13/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9446 - val_loss: 0.7676\n",
      "Epoch 14/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9184 - val_loss: 0.7520\n",
      "Epoch 15/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9076 - val_loss: 0.7376\n",
      "Epoch 16/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8856 - val_loss: 0.7240\n",
      "Epoch 17/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8732 - val_loss: 0.7110\n",
      "Epoch 18/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8554 - val_loss: 0.6987\n",
      "Epoch 19/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8449 - val_loss: 0.6868\n",
      "Epoch 20/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8318 - val_loss: 0.6762\n",
      "Epoch 21/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8222 - val_loss: 0.6654\n",
      "Epoch 22/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8087 - val_loss: 0.6556\n",
      "Epoch 23/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7972 - val_loss: 0.6465\n",
      "Epoch 24/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7934 - val_loss: 0.6375\n",
      "Epoch 25/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7774 - val_loss: 0.6290\n",
      "Epoch 26/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7675 - val_loss: 0.6209\n",
      "Epoch 27/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7598 - val_loss: 0.6133\n",
      "Epoch 28/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7520 - val_loss: 0.6057\n",
      "Epoch 29/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7446 - val_loss: 0.5991\n",
      "Epoch 30/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7358 - val_loss: 0.5924\n",
      "Epoch 31/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7289 - val_loss: 0.5861\n",
      "Epoch 32/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7225 - val_loss: 0.5800\n",
      "Epoch 33/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7172 - val_loss: 0.5745\n",
      "Epoch 34/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7099 - val_loss: 0.5689\n",
      "Epoch 35/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7043 - val_loss: 0.5637\n",
      "Epoch 36/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6980 - val_loss: 0.5589\n",
      "Epoch 37/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6935 - val_loss: 0.5542\n",
      "Epoch 38/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6877 - val_loss: 0.5498\n",
      "Epoch 39/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6828 - val_loss: 0.5454\n",
      "Epoch 40/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6788 - val_loss: 0.5413\n",
      "Epoch 41/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6742 - val_loss: 0.5376\n",
      "Epoch 42/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6695 - val_loss: 0.5338\n",
      "Epoch 43/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6655 - val_loss: 0.5303\n",
      "Epoch 44/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6616 - val_loss: 0.5269\n",
      "Epoch 45/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6578 - val_loss: 0.5238\n",
      "Epoch 46/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6542 - val_loss: 0.5207\n",
      "Epoch 47/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6505 - val_loss: 0.5178\n",
      "Epoch 48/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6471 - val_loss: 0.5150\n",
      "Epoch 49/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6442 - val_loss: 0.5124\n",
      "Epoch 50/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6410 - val_loss: 0.5098\n",
      "Epoch 51/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6386 - val_loss: 0.5073\n",
      "Epoch 52/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6354 - val_loss: 0.5050\n",
      "Epoch 53/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6326 - val_loss: 0.5028\n",
      "Epoch 54/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6302 - val_loss: 0.5006\n",
      "Epoch 55/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6287 - val_loss: 0.4986\n",
      "Epoch 56/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6254 - val_loss: 0.4966\n",
      "Epoch 57/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6231 - val_loss: 0.4947\n",
      "Epoch 58/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6210 - val_loss: 0.4929\n",
      "Epoch 59/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6195 - val_loss: 0.4911\n",
      "Epoch 60/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6169 - val_loss: 0.4894\n",
      "Epoch 61/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6153 - val_loss: 0.4878\n",
      "Epoch 62/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6132 - val_loss: 0.4863\n",
      "Epoch 63/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6114 - val_loss: 0.4848\n",
      "Epoch 64/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6097 - val_loss: 0.4834\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 65/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6080 - val_loss: 0.4820\n",
      "Epoch 66/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6065 - val_loss: 0.4806\n",
      "Epoch 67/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6050 - val_loss: 0.4794\n",
      "Epoch 68/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6035 - val_loss: 0.4781\n",
      "Epoch 69/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6020 - val_loss: 0.4770\n",
      "Epoch 70/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6007 - val_loss: 0.4758\n",
      "Epoch 71/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5994 - val_loss: 0.4747\n",
      "Epoch 72/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5981 - val_loss: 0.4736\n",
      "Epoch 73/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5969 - val_loss: 0.4726\n",
      "Epoch 74/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5958 - val_loss: 0.4716\n",
      "Epoch 75/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5947 - val_loss: 0.4706\n",
      "Epoch 76/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5937 - val_loss: 0.4697\n",
      "Epoch 77/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5926 - val_loss: 0.4688\n",
      "Epoch 78/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5918 - val_loss: 0.4679\n",
      "Epoch 79/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5907 - val_loss: 0.4671\n",
      "Epoch 80/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5897 - val_loss: 0.4663\n",
      "Epoch 81/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5890 - val_loss: 0.4655\n",
      "Epoch 82/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5880 - val_loss: 0.4647\n",
      "Epoch 83/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5872 - val_loss: 0.4640\n",
      "Epoch 84/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5864 - val_loss: 0.4632\n",
      "Epoch 85/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5856 - val_loss: 0.4626\n",
      "Epoch 86/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5849 - val_loss: 0.4619\n",
      "Epoch 87/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5841 - val_loss: 0.4612\n",
      "Epoch 88/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5834 - val_loss: 0.4606\n",
      "Epoch 89/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5828 - val_loss: 0.4600\n",
      "Epoch 90/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5821 - val_loss: 0.4594\n",
      "Epoch 91/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5815 - val_loss: 0.4588\n",
      "Epoch 92/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5809 - val_loss: 0.4582\n",
      "Epoch 93/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5803 - val_loss: 0.4577\n",
      "Epoch 94/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5797 - val_loss: 0.4572\n",
      "Epoch 95/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5792 - val_loss: 0.4567\n",
      "Epoch 96/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5786 - val_loss: 0.4562\n",
      "Epoch 97/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5781 - val_loss: 0.4557\n",
      "Epoch 98/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5776 - val_loss: 0.4552\n",
      "Epoch 99/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5771 - val_loss: 0.4548\n",
      "Epoch 100/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5766 - val_loss: 0.4543\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n",
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n",
      "Traceback (most recent call last):\n",
      "  File \"/Users/wangyanghe/Desktop/Research/tods/tods/searcher/brute_force_search.py\", line 62, in _search\n",
      "    for error in pipeline_result.error:\n",
      "TypeError: 'NoneType' object is not iterable\n",
      "Not all provided hyper-parameters for the data preparation pipeline 79ce71bd-db96-494b-a455-14f2e2ac5040 were used: ['method', 'number_of_folds', 'randomSeed', 'shuffle', 'stratified']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_4\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense_14 (Dense)             (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_12 (Dropout)         (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_15 (Dense)             (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_13 (Dropout)         (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_16 (Dense)             (None, 1)                 13        \n",
      "_________________________________________________________________\n",
      "dropout_14 (Dropout)         (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_17 (Dense)             (None, 4)                 8         \n",
      "_________________________________________________________________\n",
      "dropout_15 (Dropout)         (None, 4)                 0         \n",
      "_________________________________________________________________\n",
      "dense_18 (Dense)             (None, 1)                 5         \n",
      "_________________________________________________________________\n",
      "dropout_16 (Dropout)         (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_19 (Dense)             (None, 12)                24        \n",
      "=================================================================\n",
      "Total params: 362\n",
      "Trainable params: 362\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "None\n",
      "Epoch 1/100\n",
      "40/40 [==============================] - 0s 5ms/step - loss: 1.6224 - val_loss: 1.0535\n",
      "Epoch 2/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.4768 - val_loss: 0.9671\n",
      "Epoch 3/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.3657 - val_loss: 0.9039\n",
      "Epoch 4/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2853 - val_loss: 0.8548\n",
      "Epoch 5/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.2478 - val_loss: 0.8155\n",
      "Epoch 6/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1850 - val_loss: 0.7841\n",
      "Epoch 7/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1566 - val_loss: 0.7577\n",
      "Epoch 8/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.1255 - val_loss: 0.7338\n",
      "Epoch 9/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0796 - val_loss: 0.7136\n",
      "Epoch 10/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0545 - val_loss: 0.6954\n",
      "Epoch 11/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0291 - val_loss: 0.6783\n",
      "Epoch 12/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0136 - val_loss: 0.6627\n",
      "Epoch 13/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9984 - val_loss: 0.6483\n",
      "Epoch 14/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9704 - val_loss: 0.6347\n",
      "Epoch 15/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9495 - val_loss: 0.6222\n",
      "Epoch 16/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9450 - val_loss: 0.6098\n",
      "Epoch 17/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9221 - val_loss: 0.5983\n",
      "Epoch 18/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9060 - val_loss: 0.5875\n",
      "Epoch 19/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8961 - val_loss: 0.5772\n",
      "Epoch 20/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8758 - val_loss: 0.5674\n",
      "Epoch 21/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8635 - val_loss: 0.5580\n",
      "Epoch 22/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8520 - val_loss: 0.5492\n",
      "Epoch 23/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8367 - val_loss: 0.5407\n",
      "Epoch 24/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8265 - val_loss: 0.5328\n",
      "Epoch 25/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8198 - val_loss: 0.5251\n",
      "Epoch 26/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8084 - val_loss: 0.5180\n",
      "Epoch 27/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7999 - val_loss: 0.5108\n",
      "Epoch 28/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7929 - val_loss: 0.5042\n",
      "Epoch 29/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7812 - val_loss: 0.4979\n",
      "Epoch 30/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7739 - val_loss: 0.4918\n",
      "Epoch 31/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7629 - val_loss: 0.4861\n",
      "Epoch 32/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7554 - val_loss: 0.4807\n",
      "Epoch 33/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7480 - val_loss: 0.4754\n",
      "Epoch 34/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7435 - val_loss: 0.4704\n",
      "Epoch 35/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7358 - val_loss: 0.4656\n",
      "Epoch 36/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7309 - val_loss: 0.4610\n",
      "Epoch 37/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7260 - val_loss: 0.4567\n",
      "Epoch 38/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7171 - val_loss: 0.4525\n",
      "Epoch 39/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7124 - val_loss: 0.4485\n",
      "Epoch 40/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7065 - val_loss: 0.4447\n",
      "Epoch 41/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7011 - val_loss: 0.4412\n",
      "Epoch 42/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6965 - val_loss: 0.4377\n",
      "Epoch 43/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6914 - val_loss: 0.4343\n",
      "Epoch 44/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6886 - val_loss: 0.4311\n",
      "Epoch 45/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6872 - val_loss: 0.4280\n",
      "Epoch 46/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6799 - val_loss: 0.4251\n",
      "Epoch 47/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6760 - val_loss: 0.4223\n",
      "Epoch 48/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6714 - val_loss: 0.4196\n",
      "Epoch 49/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6685 - val_loss: 0.4171\n",
      "Epoch 50/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6642 - val_loss: 0.4146\n",
      "Epoch 51/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6605 - val_loss: 0.4123\n",
      "Epoch 52/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6584 - val_loss: 0.4100\n",
      "Epoch 53/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6540 - val_loss: 0.4078\n",
      "Epoch 54/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6517 - val_loss: 0.4057\n",
      "Epoch 55/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6498 - val_loss: 0.4037\n",
      "Epoch 56/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6457 - val_loss: 0.4018\n",
      "Epoch 57/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6431 - val_loss: 0.3999\n",
      "Epoch 58/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6410 - val_loss: 0.3982\n",
      "Epoch 59/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6376 - val_loss: 0.3964\n",
      "Epoch 60/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6357 - val_loss: 0.3948\n",
      "Epoch 61/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6334 - val_loss: 0.3932\n",
      "Epoch 62/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6313 - val_loss: 0.3917\n",
      "Epoch 63/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6290 - val_loss: 0.3902\n",
      "Epoch 64/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6271 - val_loss: 0.3888\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 65/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6252 - val_loss: 0.3874\n",
      "Epoch 66/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6233 - val_loss: 0.3861\n",
      "Epoch 67/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6213 - val_loss: 0.3848\n",
      "Epoch 68/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6195 - val_loss: 0.3836\n",
      "Epoch 69/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6180 - val_loss: 0.3824\n",
      "Epoch 70/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6162 - val_loss: 0.3813\n",
      "Epoch 71/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6150 - val_loss: 0.3802\n",
      "Epoch 72/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6135 - val_loss: 0.3791\n",
      "Epoch 73/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6118 - val_loss: 0.3781\n",
      "Epoch 74/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6104 - val_loss: 0.3771\n",
      "Epoch 75/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6094 - val_loss: 0.3761\n",
      "Epoch 76/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6080 - val_loss: 0.3752\n",
      "Epoch 77/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6068 - val_loss: 0.3743\n",
      "Epoch 78/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6055 - val_loss: 0.3734\n",
      "Epoch 79/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6045 - val_loss: 0.3726\n",
      "Epoch 80/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6033 - val_loss: 0.3717\n",
      "Epoch 81/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6023 - val_loss: 0.3710\n",
      "Epoch 82/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6012 - val_loss: 0.3702\n",
      "Epoch 83/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6002 - val_loss: 0.3694\n",
      "Epoch 84/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5992 - val_loss: 0.3687\n",
      "Epoch 85/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5983 - val_loss: 0.3680\n",
      "Epoch 86/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5973 - val_loss: 0.3674\n",
      "Epoch 87/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5964 - val_loss: 0.3667\n",
      "Epoch 88/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5956 - val_loss: 0.3661\n",
      "Epoch 89/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5948 - val_loss: 0.3655\n",
      "Epoch 90/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5941 - val_loss: 0.3649\n",
      "Epoch 91/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5933 - val_loss: 0.3643\n",
      "Epoch 92/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5926 - val_loss: 0.3637\n",
      "Epoch 93/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5920 - val_loss: 0.3632\n",
      "Epoch 94/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5913 - val_loss: 0.3626\n",
      "Epoch 95/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5906 - val_loss: 0.3621\n",
      "Epoch 96/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5900 - val_loss: 0.3616\n",
      "Epoch 97/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5894 - val_loss: 0.3611\n",
      "Epoch 98/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5888 - val_loss: 0.3607\n",
      "Epoch 99/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5883 - val_loss: 0.3602\n",
      "Epoch 100/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5878 - val_loss: 0.3598\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n",
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n",
      "Traceback (most recent call last):\n",
      "  File \"/Users/wangyanghe/Desktop/Research/tods/tods/searcher/brute_force_search.py\", line 62, in _search\n",
      "    for error in pipeline_result.error:\n",
      "TypeError: 'NoneType' object is not iterable\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_5\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense_20 (Dense)             (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_17 (Dropout)         (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_21 (Dense)             (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_18 (Dropout)         (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_22 (Dense)             (None, 1)                 13        \n",
      "_________________________________________________________________\n",
      "dropout_19 (Dropout)         (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_23 (Dense)             (None, 4)                 8         \n",
      "_________________________________________________________________\n",
      "dropout_20 (Dropout)         (None, 4)                 0         \n",
      "_________________________________________________________________\n",
      "dense_24 (Dense)             (None, 1)                 5         \n",
      "_________________________________________________________________\n",
      "dropout_21 (Dropout)         (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_25 (Dense)             (None, 12)                24        \n",
      "=================================================================\n",
      "Total params: 362\n",
      "Trainable params: 362\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "None\n",
      "Epoch 1/100\n",
      "40/40 [==============================] - 0s 4ms/step - loss: 1.4693 - val_loss: 1.5144\n",
      "Epoch 2/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.3518 - val_loss: 1.4134\n",
      "Epoch 3/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2985 - val_loss: 1.3370\n",
      "Epoch 4/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2306 - val_loss: 1.2773\n",
      "Epoch 5/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1788 - val_loss: 1.2243\n",
      "Epoch 6/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1182 - val_loss: 1.1844\n",
      "Epoch 7/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0755 - val_loss: 1.1497\n",
      "Epoch 8/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0492 - val_loss: 1.1189\n",
      "Epoch 9/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0257 - val_loss: 1.0919\n",
      "Epoch 10/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0068 - val_loss: 1.0675\n",
      "Epoch 11/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9761 - val_loss: 1.0451\n",
      "Epoch 12/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9635 - val_loss: 1.0221\n",
      "Epoch 13/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9525 - val_loss: 1.0028\n",
      "Epoch 14/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9207 - val_loss: 0.9840\n",
      "Epoch 15/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9108 - val_loss: 0.9668\n",
      "Epoch 16/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8925 - val_loss: 0.9508\n",
      "Epoch 17/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8695 - val_loss: 0.9353\n",
      "Epoch 18/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.8591 - val_loss: 0.9214\n",
      "Epoch 19/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8465 - val_loss: 0.9071\n",
      "Epoch 20/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8336 - val_loss: 0.8959\n",
      "Epoch 21/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8194 - val_loss: 0.8821\n",
      "Epoch 22/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8122 - val_loss: 0.8705\n",
      "Epoch 23/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7998 - val_loss: 0.8596\n",
      "Epoch 24/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7871 - val_loss: 0.8494\n",
      "Epoch 25/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7770 - val_loss: 0.8404\n",
      "Epoch 26/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7678 - val_loss: 0.8301\n",
      "Epoch 27/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7575 - val_loss: 0.8213\n",
      "Epoch 28/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7487 - val_loss: 0.8130\n",
      "Epoch 29/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7393 - val_loss: 0.8051\n",
      "Epoch 30/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7329 - val_loss: 0.7975\n",
      "Epoch 31/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7236 - val_loss: 0.7904\n",
      "Epoch 32/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7174 - val_loss: 0.7836\n",
      "Epoch 33/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7104 - val_loss: 0.7772\n",
      "Epoch 34/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7031 - val_loss: 0.7711\n",
      "Epoch 35/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6954 - val_loss: 0.7651\n",
      "Epoch 36/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6895 - val_loss: 0.7599\n",
      "Epoch 37/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6836 - val_loss: 0.7544\n",
      "Epoch 38/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6809 - val_loss: 0.7494\n",
      "Epoch 39/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6726 - val_loss: 0.7447\n",
      "Epoch 40/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6698 - val_loss: 0.7402\n",
      "Epoch 41/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6631 - val_loss: 0.7359\n",
      "Epoch 42/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6581 - val_loss: 0.7320\n",
      "Epoch 43/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6547 - val_loss: 0.7279\n",
      "Epoch 44/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6490 - val_loss: 0.7241\n",
      "Epoch 45/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6449 - val_loss: 0.7206\n",
      "Epoch 46/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6427 - val_loss: 0.7173\n",
      "Epoch 47/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6378 - val_loss: 0.7140\n",
      "Epoch 48/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6354 - val_loss: 0.7109\n",
      "Epoch 49/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6302 - val_loss: 0.7080\n",
      "Epoch 50/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6279 - val_loss: 0.7052\n",
      "Epoch 51/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6244 - val_loss: 0.7025\n",
      "Epoch 52/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6204 - val_loss: 0.6999\n",
      "Epoch 53/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6177 - val_loss: 0.6976\n",
      "Epoch 54/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6146 - val_loss: 0.6953\n",
      "Epoch 55/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6117 - val_loss: 0.6929\n",
      "Epoch 56/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6094 - val_loss: 0.6909\n",
      "Epoch 57/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6072 - val_loss: 0.6888\n",
      "Epoch 58/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6045 - val_loss: 0.6868\n",
      "Epoch 59/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6026 - val_loss: 0.6850\n",
      "Epoch 60/100\n",
      "40/40 [==============================] - 0s 3ms/step - loss: 0.5997 - val_loss: 0.6833\n",
      "Epoch 61/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5977 - val_loss: 0.6815\n",
      "Epoch 62/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5954 - val_loss: 0.6798\n",
      "Epoch 63/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5939 - val_loss: 0.6782\n",
      "Epoch 64/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5917 - val_loss: 0.6767\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 65/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5898 - val_loss: 0.6753\n",
      "Epoch 66/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5880 - val_loss: 0.6739\n",
      "Epoch 67/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5865 - val_loss: 0.6726\n",
      "Epoch 68/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5848 - val_loss: 0.6713\n",
      "Epoch 69/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5832 - val_loss: 0.6700\n",
      "Epoch 70/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5817 - val_loss: 0.6689\n",
      "Epoch 71/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5804 - val_loss: 0.6677\n",
      "Epoch 72/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5790 - val_loss: 0.6666\n",
      "Epoch 73/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5777 - val_loss: 0.6655\n",
      "Epoch 74/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5764 - val_loss: 0.6645\n",
      "Epoch 75/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5750 - val_loss: 0.6635\n",
      "Epoch 76/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5739 - val_loss: 0.6626\n",
      "Epoch 77/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5728 - val_loss: 0.6617\n",
      "Epoch 78/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5716 - val_loss: 0.6608\n",
      "Epoch 79/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5706 - val_loss: 0.6599\n",
      "Epoch 80/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5696 - val_loss: 0.6591\n",
      "Epoch 81/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5686 - val_loss: 0.6584\n",
      "Epoch 82/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5676 - val_loss: 0.6576\n",
      "Epoch 83/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5667 - val_loss: 0.6569\n",
      "Epoch 84/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5659 - val_loss: 0.6561\n",
      "Epoch 85/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5651 - val_loss: 0.6554\n",
      "Epoch 86/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5642 - val_loss: 0.6548\n",
      "Epoch 87/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5635 - val_loss: 0.6541\n",
      "Epoch 88/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5627 - val_loss: 0.6535\n",
      "Epoch 89/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5620 - val_loss: 0.6529\n",
      "Epoch 90/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5613 - val_loss: 0.6523\n",
      "Epoch 91/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5606 - val_loss: 0.6518\n",
      "Epoch 92/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5599 - val_loss: 0.6512\n",
      "Epoch 93/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5593 - val_loss: 0.6507\n",
      "Epoch 94/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5587 - val_loss: 0.6502\n",
      "Epoch 95/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5581 - val_loss: 0.6497\n",
      "Epoch 96/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5575 - val_loss: 0.6492\n",
      "Epoch 97/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5570 - val_loss: 0.6487\n",
      "Epoch 98/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5564 - val_loss: 0.6483\n",
      "Epoch 99/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5559 - val_loss: 0.6478\n",
      "Epoch 100/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5554 - val_loss: 0.6474\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n"
     ]
    }
   ],
   "source": [
    "# Find the best pipeline\n",
    "best_runtime, best_pipeline_result = search.search_fit(input_data=[dataset], time_limit=time_limit)\n",
    "best_pipeline = best_runtime.pipeline\n",
    "best_output = best_pipeline_result.output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Not all provided hyper-parameters for the data preparation pipeline 79ce71bd-db96-494b-a455-14f2e2ac5040 were used: ['method', 'number_of_folds', 'randomSeed', 'shuffle', 'stratified']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential_6\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "dense_26 (Dense)             (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_22 (Dropout)         (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_27 (Dense)             (None, 12)                156       \n",
      "_________________________________________________________________\n",
      "dropout_23 (Dropout)         (None, 12)                0         \n",
      "_________________________________________________________________\n",
      "dense_28 (Dense)             (None, 1)                 13        \n",
      "_________________________________________________________________\n",
      "dropout_24 (Dropout)         (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_29 (Dense)             (None, 4)                 8         \n",
      "_________________________________________________________________\n",
      "dropout_25 (Dropout)         (None, 4)                 0         \n",
      "_________________________________________________________________\n",
      "dense_30 (Dense)             (None, 1)                 5         \n",
      "_________________________________________________________________\n",
      "dropout_26 (Dropout)         (None, 1)                 0         \n",
      "_________________________________________________________________\n",
      "dense_31 (Dense)             (None, 12)                24        \n",
      "=================================================================\n",
      "Total params: 362\n",
      "Trainable params: 362\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "None\n",
      "Epoch 1/100\n",
      "40/40 [==============================] - 0s 4ms/step - loss: 1.5860 - val_loss: 1.0422\n",
      "Epoch 2/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.4206 - val_loss: 0.9430\n",
      "Epoch 3/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.3349 - val_loss: 0.8805\n",
      "Epoch 4/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.2650 - val_loss: 0.8352\n",
      "Epoch 5/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1954 - val_loss: 0.7995\n",
      "Epoch 6/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1571 - val_loss: 0.7708\n",
      "Epoch 7/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.1171 - val_loss: 0.7457\n",
      "Epoch 8/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0848 - val_loss: 0.7238\n",
      "Epoch 9/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 1.0527 - val_loss: 0.7043\n",
      "Epoch 10/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0312 - val_loss: 0.6868\n",
      "Epoch 11/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 1.0008 - val_loss: 0.6706\n",
      "Epoch 12/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9809 - val_loss: 0.6556\n",
      "Epoch 13/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9630 - val_loss: 0.6415\n",
      "Epoch 14/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9388 - val_loss: 0.6283\n",
      "Epoch 15/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.9264 - val_loss: 0.6162\n",
      "Epoch 16/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.9051 - val_loss: 0.6044\n",
      "Epoch 17/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8931 - val_loss: 0.5934\n",
      "Epoch 18/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8782 - val_loss: 0.5829\n",
      "Epoch 19/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8635 - val_loss: 0.5730\n",
      "Epoch 20/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8542 - val_loss: 0.5636\n",
      "Epoch 21/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8429 - val_loss: 0.5546\n",
      "Epoch 22/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8253 - val_loss: 0.5461\n",
      "Epoch 23/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8158 - val_loss: 0.5379\n",
      "Epoch 24/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.8028 - val_loss: 0.5302\n",
      "Epoch 25/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7972 - val_loss: 0.5228\n",
      "Epoch 26/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7844 - val_loss: 0.5158\n",
      "Epoch 27/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7754 - val_loss: 0.5091\n",
      "Epoch 28/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7669 - val_loss: 0.5026\n",
      "Epoch 29/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7594 - val_loss: 0.4966\n",
      "Epoch 30/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7521 - val_loss: 0.4907\n",
      "Epoch 31/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7435 - val_loss: 0.4852\n",
      "Epoch 32/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.7363 - val_loss: 0.4799\n",
      "Epoch 33/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7316 - val_loss: 0.4748\n",
      "Epoch 34/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7238 - val_loss: 0.4699\n",
      "Epoch 35/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7178 - val_loss: 0.4653\n",
      "Epoch 36/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7112 - val_loss: 0.4609\n",
      "Epoch 37/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7055 - val_loss: 0.4567\n",
      "Epoch 38/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.7002 - val_loss: 0.4526\n",
      "Epoch 39/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6961 - val_loss: 0.4487\n",
      "Epoch 40/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6912 - val_loss: 0.4450\n",
      "Epoch 41/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6860 - val_loss: 0.4415\n",
      "Epoch 42/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6834 - val_loss: 0.4381\n",
      "Epoch 43/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6771 - val_loss: 0.4348\n",
      "Epoch 44/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6734 - val_loss: 0.4317\n",
      "Epoch 45/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6708 - val_loss: 0.4287\n",
      "Epoch 46/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6658 - val_loss: 0.4258\n",
      "Epoch 47/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6623 - val_loss: 0.4230\n",
      "Epoch 48/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6587 - val_loss: 0.4204\n",
      "Epoch 49/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6559 - val_loss: 0.4179\n",
      "Epoch 50/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6526 - val_loss: 0.4154\n",
      "Epoch 51/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6512 - val_loss: 0.4131\n",
      "Epoch 52/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6469 - val_loss: 0.4109\n",
      "Epoch 53/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6446 - val_loss: 0.4087\n",
      "Epoch 54/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6413 - val_loss: 0.4067\n",
      "Epoch 55/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6389 - val_loss: 0.4047\n",
      "Epoch 56/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6367 - val_loss: 0.4027\n",
      "Epoch 57/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6341 - val_loss: 0.4009\n",
      "Epoch 58/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6321 - val_loss: 0.3991\n",
      "Epoch 59/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6300 - val_loss: 0.3974\n",
      "Epoch 60/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6279 - val_loss: 0.3957\n",
      "Epoch 61/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6264 - val_loss: 0.3941\n",
      "Epoch 62/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6243 - val_loss: 0.3926\n",
      "Epoch 63/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6226 - val_loss: 0.3911\n",
      "Epoch 64/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6214 - val_loss: 0.3897\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 65/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6192 - val_loss: 0.3883\n",
      "Epoch 66/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6176 - val_loss: 0.3870\n",
      "Epoch 67/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6158 - val_loss: 0.3857\n",
      "Epoch 68/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6143 - val_loss: 0.3845\n",
      "Epoch 69/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6130 - val_loss: 0.3833\n",
      "Epoch 70/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6116 - val_loss: 0.3821\n",
      "Epoch 71/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6103 - val_loss: 0.3810\n",
      "Epoch 72/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6090 - val_loss: 0.3799\n",
      "Epoch 73/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6079 - val_loss: 0.3789\n",
      "Epoch 74/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6068 - val_loss: 0.3779\n",
      "Epoch 75/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6058 - val_loss: 0.3769\n",
      "Epoch 76/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6044 - val_loss: 0.3760\n",
      "Epoch 77/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6034 - val_loss: 0.3751\n",
      "Epoch 78/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.6024 - val_loss: 0.3742\n",
      "Epoch 79/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6016 - val_loss: 0.3733\n",
      "Epoch 80/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.6005 - val_loss: 0.3725\n",
      "Epoch 81/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5996 - val_loss: 0.3717\n",
      "Epoch 82/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5987 - val_loss: 0.3709\n",
      "Epoch 83/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5979 - val_loss: 0.3702\n",
      "Epoch 84/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5971 - val_loss: 0.3694\n",
      "Epoch 85/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5963 - val_loss: 0.3687\n",
      "Epoch 86/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5956 - val_loss: 0.3680\n",
      "Epoch 87/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5948 - val_loss: 0.3674\n",
      "Epoch 88/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5941 - val_loss: 0.3667\n",
      "Epoch 89/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5934 - val_loss: 0.3661\n",
      "Epoch 90/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5928 - val_loss: 0.3655\n",
      "Epoch 91/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5922 - val_loss: 0.3649\n",
      "Epoch 92/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5915 - val_loss: 0.3644\n",
      "Epoch 93/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5910 - val_loss: 0.3638\n",
      "Epoch 94/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5903 - val_loss: 0.3633\n",
      "Epoch 95/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5898 - val_loss: 0.3627\n",
      "Epoch 96/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5892 - val_loss: 0.3622\n",
      "Epoch 97/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5887 - val_loss: 0.3617\n",
      "Epoch 98/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5882 - val_loss: 0.3613\n",
      "Epoch 99/100\n",
      "40/40 [==============================] - 0s 1ms/step - loss: 0.5877 - val_loss: 0.3608\n",
      "Epoch 100/100\n",
      "40/40 [==============================] - 0s 2ms/step - loss: 0.5872 - val_loss: 0.3603\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n",
      "/Users/wangyanghe/anaconda3/envs/tods2/lib/python3.6/site-packages/sklearn/utils/validation.py:933: FutureWarning: Passing attributes to check_is_fitted is deprecated and will be removed in 0.23. The attributes argument is ignored.\n",
      "  \"argument is ignored.\", FutureWarning)\n"
     ]
    }
   ],
   "source": [
    "# Evaluate the best pipeline\n",
    "best_scores = search.evaluate(best_pipeline).scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Search History:\n",
      "----------------------------------------------------\n",
      "Pipeline id: 108e1dce-67e7-45f2-962c-1965d988710a\n",
      "     metric     value  normalized  randomSeed  fold\n",
      "0  F1_MACRO  0.708549    0.708549           0     0\n",
      "----------------------------------------------------\n",
      "Pipeline id: 2a42a07c-0263-427c-b6c8-d9ce45ac0b21\n",
      "     metric     value  normalized  randomSeed  fold\n",
      "0  F1_MACRO  0.616695    0.616695           0     0\n"
     ]
    }
   ],
   "source": [
    "print('Search History:')\n",
    "for pipeline_result in search.history:\n",
    "    print('-' * 52)\n",
    "    print('Pipeline id:', pipeline_result.pipeline.id)\n",
    "    print(pipeline_result.scores)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Best pipeline:\n",
      "----------------------------------------------------\n",
      "Pipeline id: 108e1dce-67e7-45f2-962c-1965d988710a\n",
      "Pipeline json: {\"id\": \"108e1dce-67e7-45f2-962c-1965d988710a\", \"schema\": \"https://metadata.datadrivendiscovery.org/schemas/v0/pipeline.json\", \"created\": \"2021-04-14T16:38:58.226503Z\", \"inputs\": [{\"name\": \"inputs\"}], \"outputs\": [{\"data\": \"steps.7.produce\", \"name\": \"output predictions\"}], \"steps\": [{\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"4b42ce1e-9b98-4a25-b68e-fad13311eb65\", \"version\": \"0.3.0\", \"python_path\": \"d3m.primitives.tods.data_processing.dataset_to_dataframe\", \"name\": \"Extract a DataFrame from a Dataset\", \"digest\": \"fb5cd27ebf69b9587b23940618071ba9ffe9f47ebd7772797d61ae0521f92515\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"inputs.0\"}}, \"outputs\": [{\"id\": \"produce\"}]}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"d510cb7a-1782-4f51-b44c-58f0236e47c7\", \"version\": \"0.6.0\", \"python_path\": \"d3m.primitives.tods.data_processing.column_parser\", \"name\": \"Parses strings into their types\", \"digest\": \"62af3e97e2535681a0b1320e4ac97edeba15895862a46244ab079c47ce56958d\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.0.produce\"}}, \"outputs\": [{\"id\": \"produce\"}]}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"4503a4c6-42f7-45a1-a1d4-ed69699cf5e1\", \"version\": \"0.4.0\", \"python_path\": \"d3m.primitives.tods.data_processing.extract_columns_by_semantic_types\", \"name\": \"Extracts columns by semantic type\", \"digest\": \"d4c8204514d840de1b5acad9831f9d5581b41f425df3d14051336abdeacdf1b2\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.1.produce\"}}, \"outputs\": [{\"id\": \"produce\"}], \"hyperparams\": {\"semantic_types\": {\"type\": \"VALUE\", \"data\": [\"https://metadata.datadrivendiscovery.org/types/Attribute\"]}}}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"4503a4c6-42f7-45a1-a1d4-ed69699cf5e1\", \"version\": \"0.4.0\", \"python_path\": \"d3m.primitives.tods.data_processing.extract_columns_by_semantic_types\", \"name\": \"Extracts columns by semantic type\", \"digest\": \"d4c8204514d840de1b5acad9831f9d5581b41f425df3d14051336abdeacdf1b2\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.0.produce\"}}, \"outputs\": [{\"id\": \"produce\"}], \"hyperparams\": {\"semantic_types\": {\"type\": \"VALUE\", \"data\": [\"https://metadata.datadrivendiscovery.org/types/TrueTarget\"]}}}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"642de2e7-5590-3cab-9266-2a53c326c461\", \"version\": \"0.0.1\", \"python_path\": \"d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler\", \"name\": \"Axis_wise_scale\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.2.produce\"}}, \"outputs\": [{\"id\": \"produce\"}]}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"eaff2f35-978c-4530-a12e-061a5f0beacd\", \"version\": \"0.1.0\", \"python_path\": \"d3m.primitives.tods.feature_analysis.statistical_mean\", \"name\": \"Time Series Decompostional\", \"digest\": \"86f8a7a74cc872b09ec7dbec5910f9613c918255ba618731aa7f1ff9b42e37ba\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.4.produce\"}}, \"outputs\": [{\"id\": \"produce\"}]}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"67e7fcdf-d645-3417-9aa4-85cd369487d9\", \"version\": \"0.0.1\", \"python_path\": \"d3m.primitives.tods.detection_algorithm.pyod_ae\", \"name\": \"TODS.anomaly_detection_primitives.AutoEncoder\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.5.produce\"}}, \"outputs\": [{\"id\": \"produce\"}], \"hyperparams\": {\"contamination\": {\"type\": \"VALUE\", \"data\": 0.01}}}, {\"type\": \"PRIMITIVE\", \"primitive\": {\"id\": \"8d38b340-f83f-4877-baaa-162f8e551736\", \"version\": \"0.3.0\", \"python_path\": \"d3m.primitives.tods.data_processing.construct_predictions\", \"name\": \"Construct pipeline predictions output\", \"digest\": \"33d90bfb7f97f47a6de5372c5f912c26fca8da2d2777661651c69687ad6f9950\"}, \"arguments\": {\"inputs\": {\"type\": \"CONTAINER\", \"data\": \"steps.6.produce\"}, \"reference\": {\"type\": \"CONTAINER\", \"data\": \"steps.1.produce\"}}, \"outputs\": [{\"id\": \"produce\"}]}], \"digest\": \"a4ba790aa8c5ad34057cd97135f67edc8ccdc79d0bec0c4660fea0d2dfc82eb3\"}\n",
      "Output:\n",
      "     d3mIndex  anomaly\n",
      "0           0        0\n",
      "1           1        0\n",
      "2           2        0\n",
      "3           3        0\n",
      "4           4        0\n",
      "...       ...      ...\n",
      "1395     1395        0\n",
      "1396     1396        0\n",
      "1397     1397        1\n",
      "1398     1398        1\n",
      "1399     1399        0\n",
      "\n",
      "[1400 rows x 2 columns]\n",
      "Scores:\n",
      "     metric     value  normalized  randomSeed  fold\n",
      "0  F1_MACRO  0.708549    0.708549           0     0\n"
     ]
    }
   ],
   "source": [
    "print('Best pipeline:')\n",
    "print('-' * 52)\n",
    "print('Pipeline id:', best_pipeline.id)\n",
    "print('Pipeline json:', best_pipeline.to_json())\n",
    "print('Output:')\n",
    "print(best_output)\n",
    "print('Scores:')\n",
    "print(best_scores)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
