{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Now write into a python module"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## input.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/input.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "# Input function functions\n",
    "def split_and_convert_string(string_tensor):\n",
    "  \"\"\"Splits and converts string tensor into dense float tensor.\n",
    "\n",
    "  Given string tensor, splits string by delimiter, converts to and returns\n",
    "  dense float tensor.\n",
    "\n",
    "  Args:\n",
    "    string_tensor: tf.string tensor.\n",
    "\n",
    "  Returns:\n",
    "    tf.float64 tensor split along delimiter.\n",
    "  \"\"\"\n",
    "  # Split string tensor into a sparse tensor based on delimiter\n",
    "  split_string = tf.string_split(source=tf.expand_dims(\n",
    "      input=string_tensor, axis=0), delimiter=\";\")\n",
    "\n",
    "  # Converts the values of the sparse tensor to floats\n",
    "  converted_tensor = tf.string_to_number(\n",
    "      string_tensor=split_string.values,\n",
    "      out_type=tf.float64)\n",
    "\n",
    "  # Create a new sparse tensor with the new converted values,\n",
    "  # because the original sparse tensor values are immutable\n",
    "  new_sparse_tensor = tf.SparseTensor(\n",
    "      indices=split_string.indices,\n",
    "      values=converted_tensor,\n",
    "      dense_shape=split_string.dense_shape)\n",
    "\n",
    "  # Create a dense tensor of the float values that were converted from text csv\n",
    "  dense_floats = tf.sparse_tensor_to_dense(\n",
    "      sp_input=new_sparse_tensor, default_value=0.0)\n",
    "\n",
    "  dense_floats_vector = tf.squeeze(input=dense_floats, axis=0)\n",
    "\n",
    "  return dense_floats_vector\n",
    "\n",
    "\n",
    "def convert_sequences_from_strings_to_floats(features, column_list, seq_len):\n",
    "  \"\"\"Converts sequences from single strings to a sequence of floats.\n",
    "\n",
    "  Given features dictionary and feature column names list, convert features\n",
    "  from strings to a sequence of floats.\n",
    "\n",
    "  Args:\n",
    "    features: Dictionary of tensors of our features as tf.strings.\n",
    "    column_list: List of column names of our features.\n",
    "    seq_len: Number of timesteps in sequence.\n",
    "\n",
    "  Returns:\n",
    "    Dictionary of tensors of our features as tf.float64s.\n",
    "  \"\"\"\n",
    "  for column in column_list:\n",
    "    features[column] = split_and_convert_string(features[column])\n",
    "    # Since we know the sequence length, set the shape to remove the ambiguity\n",
    "    features[column].set_shape([seq_len])\n",
    "\n",
    "  return features\n",
    "\n",
    "\n",
    "def decode_csv(value_column, mode, batch_size, params):\n",
    "  \"\"\"Decodes CSV file into tensors.\n",
    "\n",
    "  Given single string tensor, sequence length, and number of features,\n",
    "  returns features dictionary of tensors and labels tensor.\n",
    "\n",
    "  Args:\n",
    "    value_column: tf.string tensor of shape () compromising entire line of\n",
    "      CSV file.\n",
    "    mode: The estimator ModeKeys. Can be TRAIN or EVAL.\n",
    "    batch_size: Number of examples per batch.\n",
    "    params: Dictionary of user passed parameters.\n",
    "\n",
    "  Returns:\n",
    "    Features dictionary of tensors and labels tensor.\n",
    "  \"\"\"\n",
    "  if (mode == tf.estimator.ModeKeys.TRAIN or\n",
    "      (mode == tf.estimator.ModeKeys.EVAL and\n",
    "       (params[\"training_mode\"] != \"tune_anomaly_thresholds\" or\n",
    "        (params[\"training_mode\"] == \"tune_anomaly_thresholds\" and\n",
    "         not params[\"labeled_tune_thresh\"])))):\n",
    "    # For subset of CSV files that do NOT have labels\n",
    "    columns = tf.decode_csv(\n",
    "        records=value_column,\n",
    "        record_defaults=params[\"feat_defaults\"],\n",
    "        field_delim=\",\")\n",
    "\n",
    "    features = dict(zip(params[\"feat_names\"], columns))\n",
    "    features = convert_sequences_from_strings_to_floats(\n",
    "        features=features,\n",
    "        column_list=params[\"feat_names\"],\n",
    "        seq_len=params[\"seq_len\"])\n",
    "\n",
    "    return features\n",
    "  else:\n",
    "    # For subset of CSV files that DO have labels\n",
    "    columns = tf.decode_csv(\n",
    "        records=value_column,\n",
    "        record_defaults=params[\"feat_defaults\"] + [[0.0]],  # add label default\n",
    "        field_delim=\",\")\n",
    "\n",
    "    features = dict(zip(params[\"feat_names\"] + [\"anomalous_sequence_flag\"], columns))\n",
    "\n",
    "    labels = tf.cast(x=features.pop(\"anomalous_sequence_flag\"), dtype=tf.float64)\n",
    "\n",
    "    features = convert_sequences_from_strings_to_floats(\n",
    "        features=features,\n",
    "        column_list=params[\"feat_names\"],\n",
    "        seq_len=params[\"seq_len\"])\n",
    "\n",
    "    return features, labels\n",
    "\n",
    "\n",
    "def read_dataset(filename, mode, batch_size, params):\n",
    "  \"\"\"Reads CSV time series dataset using tf.data, doing necessary preprocessing.\n",
    "\n",
    "  Given filename, mode, batch size and other parameters, read CSV dataset using\n",
    "  Dataset API, apply necessary preprocessing, and return an input function to\n",
    "  the Estimator API.\n",
    "\n",
    "  Args:\n",
    "    filename: The file pattern that we want to read into our tf.data dataset.\n",
    "    mode: The estimator ModeKeys. Can be TRAIN or EVAL.\n",
    "    batch_size: Number of examples per batch.\n",
    "    params: Dictionary of user passed parameters.\n",
    "\n",
    "  Returns:\n",
    "    An input function.\n",
    "  \"\"\"\n",
    "  def _input_fn():\n",
    "    \"\"\"Wrapper input function to be used by Estimator API to get data tensors.\n",
    "\n",
    "    Returns:\n",
    "      Batched dataset object of dictionary of feature tensors and label tensor.\n",
    "    \"\"\"\n",
    "\n",
    "    # Create list of files that match pattern\n",
    "    file_list = tf.gfile.Glob(filename=filename)\n",
    "\n",
    "    # Create dataset from file list\n",
    "    dataset = tf.data.TextLineDataset(filenames=file_list)  # Read text file\n",
    "\n",
    "    # Decode the CSV file into a features dictionary of tensors\n",
    "    dataset = dataset.map(\n",
    "        map_func=lambda x: decode_csv(\n",
    "            value_column=x, mode=mode, batch_size=batch_size, params=params))\n",
    "\n",
    "    # Determine amount of times to repeat file if we are training or evaluating\n",
    "    if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "      num_epochs = None  # indefinitely\n",
    "    else:\n",
    "      num_epochs = 1  # end-of-input after this\n",
    "\n",
    "    # Repeat files num_epoch times\n",
    "    dataset = dataset.repeat(count=num_epochs)\n",
    "\n",
    "    # Group the data into batches\n",
    "    dataset = dataset.batch(batch_size=batch_size)\n",
    "\n",
    "    # Determine if we should shuffle based on if we are training or evaluating\n",
    "    if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "      dataset = dataset.shuffle(buffer_size=10 * batch_size)\n",
    "\n",
    "    # Create a iterator, then pull batch of features from the example queue\n",
    "    batched_dataset = dataset.make_one_shot_iterator().get_next()\n",
    "\n",
    "    return batched_dataset\n",
    "\n",
    "  return _input_fn"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## autoencoder_dense.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/autoencoder_dense.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "# Dense autoencoder model functions\n",
    "def dense_encoder(X, params):\n",
    "  \"\"\"Dense model encoder subgraph that produces latent matrix.\n",
    "\n",
    "  Given data matrix tensor X and dictionary of parameters, process through dense\n",
    "  model encoder subgraph and return encoder latent vector for each example in\n",
    "  batch.\n",
    "\n",
    "  Args:\n",
    "    X: tf.float64 matrix tensor of input data.\n",
    "    params: Dictionary of parameters.\n",
    "\n",
    "  Returns:\n",
    "    tf.float64 matrix tensor encoder latent vector for each example in batch.\n",
    "  \"\"\"\n",
    "  # Create the input layer to our DNN\n",
    "  network = X\n",
    "\n",
    "  # Add hidden layers with the given number of units/neurons per layer\n",
    "  for units in params[\"enc_dnn_hidden_units\"]:\n",
    "    network = tf.layers.dense(\n",
    "        inputs=network,\n",
    "        units=units,\n",
    "        activation=tf.nn.relu)\n",
    "\n",
    "  latent_matrix = tf.layers.dense(\n",
    "      inputs=network,\n",
    "      units=params[\"latent_vector_size\"],\n",
    "      activation=tf.nn.relu)\n",
    "\n",
    "  return latent_matrix\n",
    "\n",
    "\n",
    "def dense_decoder(latent_matrix, orig_dims, params):\n",
    "  \"\"\"Dense model decoder subgraph that produces output matrix.\n",
    "\n",
    "  Given encoder latent matrix tensor, the original dimensions of the input, and\n",
    "  dictionary of parameters, process through dense model decoder subgraph and\n",
    "  return decoder output matrix.\n",
    "\n",
    "  Args:\n",
    "    latent_matrix: tf.float64 matrix tensor of encoder latent matrix.\n",
    "    orig_dims: Original dimensions of input data.\n",
    "    params: Dictionary of parameters.\n",
    "\n",
    "  Returns:\n",
    "    tf.float64 matrix tensor decoder output vector for each example in batch.\n",
    "  \"\"\"\n",
    "  # Create the input layer to our DNN\n",
    "  network = latent_matrix\n",
    "\n",
    "  # Add hidden layers with the given number of units/neurons per layer\n",
    "  for units in params[\"dec_dnn_hidden_units\"][::-1]:\n",
    "    network = tf.layers.dense(\n",
    "        inputs=network,\n",
    "        units=units,\n",
    "        activation=tf.nn.relu)\n",
    "\n",
    "  output_matrix = tf.layers.dense(\n",
    "      inputs=network,\n",
    "      units=orig_dims,\n",
    "      activation=tf.nn.relu)\n",
    "\n",
    "  return output_matrix\n",
    "\n",
    "\n",
    "def dense_autoencoder(X, orig_dims, params):\n",
    "  \"\"\"Dense model autoencoder using dense encoder and decoder networks.\n",
    "\n",
    "  Given data matrix tensor X, the original dimensions of the input, and\n",
    "  dictionary of parameters, process through dense model encoder and decoder\n",
    "  subgraphs and return reconstructed inputs as output.\n",
    "\n",
    "  Args:\n",
    "    X: tf.float64 matrix tensor of input data.\n",
    "    orig_dims: Original dimensions of input data.\n",
    "    params: Dictionary of parameters.\n",
    "\n",
    "  Returns:\n",
    "    tf.float64 matrix tensor decoder output vector for each example in batch\n",
    "    that is the reconstructed inputs.\n",
    "  \"\"\"\n",
    "  latent_matrix = dense_encoder(X, params)\n",
    "  output_matrix = dense_decoder(latent_matrix, orig_dims, params)\n",
    "\n",
    "  return output_matrix\n",
    "\n",
    "\n",
    "def dense_autoencoder_model(\n",
    "    X, mode, params, cur_batch_size, dummy_var):\n",
    "  \"\"\"Dense autoencoder to reconstruct inputs and minimize reconstruction error.\n",
    "\n",
    "  Given data matrix tensor X, the current Estimator mode, the dictionary of\n",
    "  parameters, and the current batch size, process through dense model encoder\n",
    "  and decoder subgraphs and return reconstructed inputs as output.\n",
    "\n",
    "  Args:\n",
    "    X: tf.float64 matrix tensor of input data.\n",
    "    mode: Estimator ModeKeys. Can take values of TRAIN, EVAL, and PREDICT.\n",
    "    params: Dictionary of parameters.\n",
    "    cur_batch_size: Current batch size, could be partially filled.\n",
    "    dummy_var: Dummy variable used to allow training mode to happen since it\n",
    "      requires a gradient to tie back to the graph dependency.\n",
    "\n",
    "  Returns:\n",
    "    loss: Reconstruction loss.\n",
    "    train_op: Train operation so that Estimator can correctly add to dependency\n",
    "      graph.\n",
    "    X_time: 2D tensor representation of time major input data.\n",
    "    X_time_recon: 2D tensor representation of time major input data.\n",
    "    X_feat: 2D tensor representation of feature major input data.\n",
    "    X_feat_recon: 2D tensor representation of feature major input data.\n",
    "  \"\"\"\n",
    "  # Reshape into 2-D tensors\n",
    "  # Time based\n",
    "  # shape = (cur_batch_size * seq_len, num_feat)\n",
    "  X_time = tf.reshape(\n",
    "      tensor=X,\n",
    "      shape=[cur_batch_size * params[\"seq_len\"], params[\"num_feat\"]])\n",
    "\n",
    "  # shape = (cur_batch_size * seq_len, num_feat)\n",
    "  X_time_recon = dense_autoencoder(X_time, params[\"num_feat\"], params)\n",
    "\n",
    "  # Features based\n",
    "  # shape = (cur_batch_size, num_feat, seq_len)\n",
    "  X_transposed = tf.transpose(a=X, perm=[0, 2, 1])\n",
    "\n",
    "  # shape = (cur_batch_size * num_feat, seq_len)\n",
    "  X_feat = tf.reshape(\n",
    "      tensor=X_transposed,\n",
    "      shape=[cur_batch_size * params[\"num_feat\"], params[\"seq_len\"]])\n",
    "\n",
    "  # shape = (cur_batch_size * num_feat, seq_len)\n",
    "  X_feat_recon = dense_autoencoder(X_feat, params[\"seq_len\"], params)\n",
    "\n",
    "  if (mode == tf.estimator.ModeKeys.TRAIN and\n",
    "      params[\"training_mode\"] == \"reconstruction\"):\n",
    "    X_time_recon_3d = tf.reshape(\n",
    "        tensor=X_time_recon,\n",
    "        shape=[cur_batch_size, params[\"seq_len\"], params[\"num_feat\"]])\n",
    "    X_feat_recon_3d = tf.transpose(\n",
    "        a=tf.reshape(\n",
    "            tensor=X_feat_recon,\n",
    "            shape=[cur_batch_size, params[\"num_feat\"], params[\"seq_len\"]]),\n",
    "        perm=[0, 2, 1])\n",
    "\n",
    "    X_time_recon_3d_weighted = X_time_recon_3d * params[\"time_loss_weight\"]\n",
    "    X_feat_recon_3d_weighted = X_feat_recon_3d * params[\"feat_loss_weight\"]\n",
    "\n",
    "    predictions = (X_time_recon_3d_weighted + X_feat_recon_3d_weighted) \\\n",
    "      / (params[\"time_loss_weight\"] + params[\"feat_loss_weight\"])\n",
    "\n",
    "    loss = tf.losses.mean_squared_error(labels=X, predictions=predictions)\n",
    "\n",
    "    train_op = tf.contrib.layers.optimize_loss(\n",
    "        loss=loss,\n",
    "        global_step=tf.train.get_global_step(),\n",
    "        learning_rate=params[\"learning_rate\"],\n",
    "        optimizer=\"Adam\")\n",
    "\n",
    "    return loss, train_op, None, None, None, None\n",
    "  else:\n",
    "    return None, None, X_time, X_time_recon, X_feat, X_feat_recon"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## autoencoder_lstm.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/autoencoder_lstm.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "# LSTM Encoder-decoder Autoencoder model functions\n",
    "def create_LSTM_stack(lstm_hidden_units, lstm_dropout_output_keep_probs):\n",
    "  \"\"\"Create LSTM stacked cells.\n",
    "\n",
    "  Given list of LSTM hidden units and list of LSTM dropout output keep\n",
    "  probabilities.\n",
    "\n",
    "  Args:\n",
    "    lstm_hidden_units: List of integers for the number of hidden units in each\n",
    "      layer.\n",
    "    lstm_dropout_output_keep_probs: List of floats for the dropout output keep\n",
    "      probabilities for each layer.\n",
    "\n",
    "  Returns:\n",
    "    MultiRNNCell object of stacked LSTM layers.\n",
    "  \"\"\"\n",
    "  # First create a list of LSTM cell objects using our list of lstm hidden\n",
    "  # unit sizes\n",
    "  lstm_cells = [tf.contrib.rnn.BasicLSTMCell(\n",
    "      num_units=units,\n",
    "      forget_bias=1.0,\n",
    "      state_is_tuple=True)\n",
    "                for units in lstm_hidden_units]\n",
    "\n",
    "  # Next apply a dropout wrapper to our stack of LSTM cells,\n",
    "  # in this case just on the outputs\n",
    "  dropout_lstm_cells = [tf.nn.rnn_cell.DropoutWrapper(\n",
    "      cell=lstm_cells[cell_index],\n",
    "      input_keep_prob=1.0,\n",
    "      output_keep_prob=lstm_dropout_output_keep_probs[cell_index],\n",
    "      state_keep_prob=1.0)\n",
    "                        for cell_index in range(len(lstm_cells))]\n",
    "\n",
    "  # Create a stack of layers of LSTM cells\n",
    "  # Combines list into MultiRNNCell object\n",
    "  stacked_lstm_cells = tf.contrib.rnn.MultiRNNCell(\n",
    "      cells=dropout_lstm_cells,\n",
    "      state_is_tuple=True)\n",
    "\n",
    "  return stacked_lstm_cells\n",
    "\n",
    "\n",
    "# The rnn_decoder function takes labels during TRAIN/EVAL\n",
    "# and a start token followed by its previous predictions during PREDICT\n",
    "# Starts with an initial state of the final encoder states\n",
    "def rnn_decoder(dec_input, init_state, cell, infer, dnn_hidden_units, num_feat):\n",
    "  \"\"\"Decoder for RNN cell.\n",
    "\n",
    "  Given list of LSTM hidden units and list of LSTM dropout output keep\n",
    "  probabilities.\n",
    "\n",
    "  Args:\n",
    "    dec_input: List of tf.float64 current batch size by number of features\n",
    "      matrix tensors input to the decoder.\n",
    "    init_state: Initial state of the decoder cell. Final state from the\n",
    "      encoder cell.\n",
    "    cell: RNN Cell object.\n",
    "    infer: Boolean whether in inference mode or not.\n",
    "    dnn_hidden_units: Python list of integers of number of units per DNN layer.\n",
    "    num_feat: Python integer of the number of features.\n",
    "\n",
    "  Returns:\n",
    "    outputs: List of decoder outputs of length number of timesteps of tf.float64\n",
    "      current batch size by number of features matrix tensors.\n",
    "    state: Final cell state of the decoder.\n",
    "  \"\"\"\n",
    "  # Create the decoder variable scope\n",
    "  with tf.variable_scope(\"decoder\"):\n",
    "    # Load in our initial state from our encoder\n",
    "    # Tuple of final encoder c_state and h_state of final encoder layer\n",
    "    state = init_state\n",
    "\n",
    "    # Create an empty list to store our hidden state output for every timestep\n",
    "    outputs = []\n",
    "\n",
    "    # Begin with no previous output\n",
    "    previous_output = None\n",
    "\n",
    "    # Loop over all of our dec_input which will be seq_len long\n",
    "    for index, decoder_input in enumerate(dec_input):\n",
    "      # If there has been a previous output, we will determine the next input\n",
    "      if previous_output is not None:\n",
    "        # Create the input layer to our DNN\n",
    "        # shape = (cur_batch_size, lstm_hidden_units[-1])\n",
    "        network = previous_output\n",
    "\n",
    "        # Create our dnn variable scope\n",
    "        with tf.variable_scope(name_or_scope=\"dnn\", reuse=tf.AUTO_REUSE):\n",
    "          # Add hidden layers with the given number of units/neurons per layer\n",
    "          # shape = (cur_batch_size, dnn_hidden_units[i])\n",
    "          for units in dnn_hidden_units:\n",
    "            network = tf.layers.dense(\n",
    "                inputs=network,\n",
    "                units=units,\n",
    "                activation=tf.nn.relu)\n",
    "\n",
    "          # Connect final hidden layer to linear layer to get the logits\n",
    "          # shape = (cur_batch_size, num_feat)\n",
    "          logits = tf.layers.dense(\n",
    "              inputs=network,\n",
    "              units=num_feat,\n",
    "              activation=None)\n",
    "\n",
    "        # If we are in inference then we will overwrite our next decoder_input\n",
    "        # with the logits we just calculated. Otherwise, we leave the decoder\n",
    "        # input input as it was from the enumerated list. We have to calculate\n",
    "        # the logits even when not using them so that the correct DNN subgraph\n",
    "        # will be generated here and after the encoder-decoder for both\n",
    "        # training and inference\n",
    "        if infer:\n",
    "          # shape = (cur_batch_size, num_feat)\n",
    "          decoder_input = logits\n",
    "\n",
    "      # If this isn\"t our first time through the loop, just reuse(share) the\n",
    "      # same variables for each iteration within the current variable scope\n",
    "      if index > 0:\n",
    "        tf.get_variable_scope().reuse_variables()\n",
    "\n",
    "      # Run the decoder input through the decoder stack picking up from the\n",
    "      # previous state\n",
    "      # output_shape = (cur_batch_size, lstm_hidden_units[-1])\n",
    "      # state_shape = # tuple of final decoder c_state and h_state\n",
    "      output, state = cell(decoder_input, state)\n",
    "\n",
    "      # Append the current decoder hidden state output to the outputs list\n",
    "      # List seq_len long of shape = (cur_batch_size, lstm_hidden_units[-1])\n",
    "      outputs.append(output)\n",
    "\n",
    "      # Set the previous output to the output just calculated\n",
    "      # shape = (cur_batch_size, lstm_hidden_units[-1])\n",
    "      previous_output = output\n",
    "  return outputs, state\n",
    "\n",
    "\n",
    "def lstm_enc_dec_autoencoder_model(\n",
    "    X, mode, params, cur_batch_size, dummy_var):\n",
    "  \"\"\"LSTM autoencoder to reconstruct inputs and minimize reconstruction error.\n",
    "\n",
    "  Given data matrix tensor X, the current Estimator mode, the dictionary of\n",
    "  parameters, current batch size, and the number of features, process through\n",
    "  LSTM model encoder, decoder, and DNN subgraphs and return reconstructed inputs\n",
    "  as output.\n",
    "\n",
    "  Args:\n",
    "    X: tf.float64 matrix tensor of input data.\n",
    "    mode: Estimator ModeKeys. Can take values of TRAIN, EVAL, and PREDICT.\n",
    "    params: Dictionary of parameters.\n",
    "    cur_batch_size: Current batch size, could be partially filled.\n",
    "    dummy_var: Dummy variable used to allow training mode to happen since it\n",
    "      requires a gradient to tie back to the graph dependency.\n",
    "\n",
    "  Returns:\n",
    "    loss: Reconstruction loss.\n",
    "    train_op: Train operation so that Estimator can correctly add to dependency\n",
    "      graph.\n",
    "    X_time: 2D tensor representation of time major input data.\n",
    "    X_time_recon: 2D tensor representation of time major input data.\n",
    "    X_feat: 2D tensor representation of feature major input data.\n",
    "    X_feat_recon: 2D tensor representation of feature major input data.\n",
    "  \"\"\"\n",
    "  # Unstack 3-D features tensor into a sequence(list) of 2-D tensors\n",
    "  # shape = (cur_batch_size, num_feat)\n",
    "  X_sequence = tf.unstack(value=X, num=params[\"seq_len\"], axis=1)\n",
    "\n",
    "  # Since this is an autoencoder, the features are the labels.\n",
    "  # It often works better though to have the labels in reverse order\n",
    "  # shape = (cur_batch_size, seq_len, num_feat)\n",
    "  if params[\"reverse_labels_sequence\"]:\n",
    "    Y = tf.reverse_sequence(\n",
    "        input=X,\n",
    "        seq_lengths=tf.tile(\n",
    "            input=tf.constant(value=[params[\"seq_len\"]], dtype=tf.int64),\n",
    "            multiples=tf.expand_dims(input=cur_batch_size, axis=0)),\n",
    "        seq_axis=1,\n",
    "        batch_axis=0)\n",
    "  else:\n",
    "    Y = X  # shape = (cur_batch_size, seq_len, num_feat)\n",
    "\n",
    "  ##############################################################################\n",
    "\n",
    "  # Create encoder of encoder-decoder LSTM stacks\n",
    "\n",
    "  # Create our decoder now\n",
    "  dec_stacked_lstm_cells = create_LSTM_stack(\n",
    "      params[\"dec_lstm_hidden_units\"],\n",
    "      params[\"lstm_dropout_output_keep_probs\"])\n",
    "\n",
    "  # Create the encoder variable scope\n",
    "  with tf.variable_scope(\"encoder\"):\n",
    "    # Create separate encoder cells with their own weights separate from decoder\n",
    "    enc_stacked_lstm_cells = create_LSTM_stack(\n",
    "        params[\"enc_lstm_hidden_units\"],\n",
    "        params[\"lstm_dropout_output_keep_probs\"])\n",
    "\n",
    "    # Encode the input sequence using our encoder stack of LSTMs\n",
    "    # enc_outputs = seq_len long of shape = (cur_batch_size, enc_lstm_hidden_units[-1])\n",
    "    # enc_states = tuple of final encoder c_state and h_state for each layer\n",
    "    _, enc_states = tf.nn.static_rnn(\n",
    "        cell=enc_stacked_lstm_cells,\n",
    "        inputs=X_sequence,\n",
    "        initial_state=enc_stacked_lstm_cells.zero_state(\n",
    "            batch_size=tf.cast(x=cur_batch_size, dtype=tf.int32),\n",
    "            dtype=tf.float64),\n",
    "        dtype=tf.float64)\n",
    "\n",
    "    # We just pass on the final c and h states of the encoder\"s last layer,\n",
    "    # so extract that and drop the others\n",
    "    # LSTMStateTuple shape = (cur_batch_size, lstm_hidden_units[-1])\n",
    "    enc_final_states = enc_states[-1]\n",
    "\n",
    "    # Extract the c and h states from the tuple\n",
    "    # both have shape = (cur_batch_size, lstm_hidden_units[-1])\n",
    "    enc_final_c, enc_final_h = enc_final_states\n",
    "\n",
    "    # In case the decoder\"s first layer's number of units is different than\n",
    "    # encoder's last layer's number of units, use a dense layer to map to the\n",
    "    # correct shape\n",
    "    # shape = (cur_batch_size, dec_lstm_hidden_units[0])\n",
    "    enc_final_c_dense = tf.layers.dense(\n",
    "        inputs=enc_final_c,\n",
    "        units=params[\"dec_lstm_hidden_units\"][0],\n",
    "        activation=None)\n",
    "\n",
    "    # shape = (cur_batch_size, dec_lstm_hidden_units[0])\n",
    "    enc_final_h_dense = tf.layers.dense(\n",
    "        inputs=enc_final_h,\n",
    "        units=params[\"dec_lstm_hidden_units\"][0],\n",
    "        activation=None)\n",
    "\n",
    "    # The decoder\"s first layer\"s state comes from the encoder,\n",
    "    # the rest of the layers\" initial states are zero\n",
    "    dec_init_states = tuple(\n",
    "        [tf.contrib.rnn.LSTMStateTuple(c=enc_final_c_dense,\n",
    "                                       h=enc_final_h_dense)] + \\\n",
    "        [tf.contrib.rnn.LSTMStateTuple(\n",
    "            c=tf.zeros(shape=[cur_batch_size, units], dtype=tf.float64),\n",
    "            h=tf.zeros(shape=[cur_batch_size, units], dtype=tf.float64))\n",
    "         for units in params[\"dec_lstm_hidden_units\"][1:]])\n",
    "\n",
    "  ##############################################################################\n",
    "\n",
    "  # Create decoder of encoder-decoder LSTM stacks\n",
    "\n",
    "  # Train our decoder now\n",
    "\n",
    "  # Encoder-decoders work differently during training, evaluation, and inference\n",
    "  # so we will have two separate subgraphs for each\n",
    "  if (mode == tf.estimator.ModeKeys.TRAIN and\n",
    "      params[\"training_mode\"] == \"reconstruction\"):\n",
    "    # Break 3-D labels tensor into a list of 2-D tensors\n",
    "    # shape = (cur_batch_size, num_feat)\n",
    "    unstacked_labels = tf.unstack(value=Y, num=params[\"seq_len\"], axis=1)\n",
    "\n",
    "    # Call our decoder using the labels as our inputs, the encoder final state\n",
    "    # as our initial state, our other LSTM stack as our cells, and inference\n",
    "    # set to false\n",
    "    dec_outputs, _ = rnn_decoder(\n",
    "        dec_input=unstacked_labels,\n",
    "        init_state=dec_init_states,\n",
    "        cell=dec_stacked_lstm_cells,\n",
    "        infer=False,\n",
    "        dnn_hidden_units=params[\"dnn_hidden_units\"],\n",
    "        num_feat=params[\"num_feat\"])\n",
    "  else:\n",
    "    # Since this is inference create fake labels. The list length needs to be\n",
    "    # the output sequence length even though only the first element is the only\n",
    "    # one actually used (as our go signal)\n",
    "    fake_labels = [tf.zeros(shape=[cur_batch_size, params[\"num_feat\"]],\n",
    "                            dtype=tf.float64)\n",
    "                   for _ in range(params[\"seq_len\"])]\n",
    "\n",
    "    # Call our decoder using fake labels as our inputs, the encoder final state\n",
    "    # as our initial state, our other LSTM stack as our cells, and inference\n",
    "    # set to true\n",
    "    # dec_outputs = seq_len long of shape = (cur_batch_size, dec_lstm_hidden_units[-1])\n",
    "    # decoder_states = tuple of final decoder c_state and h_state for each layer\n",
    "    dec_outputs, _ = rnn_decoder(\n",
    "        dec_input=fake_labels,\n",
    "        init_state=dec_init_states,\n",
    "        cell=dec_stacked_lstm_cells,\n",
    "        infer=True,\n",
    "        dnn_hidden_units=params[\"dnn_hidden_units\"],\n",
    "        num_feat=params[\"num_feat\"])\n",
    "\n",
    "  # Stack together list of rank 2 decoder output tensors into one rank 3 tensor\n",
    "  # shape = (cur_batch_size, seq_len, lstm_hidden_units[-1])\n",
    "  stacked_dec_outputs = tf.stack(values=dec_outputs, axis=1)\n",
    "\n",
    "  # Reshape rank 3 decoder outputs into rank 2 by folding sequence length into\n",
    "  # batch size\n",
    "  # shape = (cur_batch_size * seq_len, lstm_hidden_units[-1])\n",
    "  reshaped_stacked_dec_outputs = tf.reshape(\n",
    "      tensor=stacked_dec_outputs,\n",
    "      shape=[cur_batch_size * params[\"seq_len\"],\n",
    "             params[\"dec_lstm_hidden_units\"][-1]])\n",
    "\n",
    "  ##############################################################################\n",
    "\n",
    "  # Create the DNN structure now after the encoder-decoder LSTM stack\n",
    "  # Create the input layer to our DNN\n",
    "  # shape = (cur_batch_size * seq_len, lstm_hidden_units[-1])\n",
    "  network = reshaped_stacked_dec_outputs\n",
    "\n",
    "  # Reuse the same variable scope as we used within our decoder (for inference)\n",
    "  with tf.variable_scope(name_or_scope=\"dnn\", reuse=tf.AUTO_REUSE):\n",
    "    # Add hidden layers with the given number of units/neurons per layer\n",
    "    for units in params[\"dnn_hidden_units\"]:\n",
    "      # shape = (cur_batch_size * seq_len, dnn_hidden_units[i])\n",
    "      network = tf.layers.dense(\n",
    "          inputs=network,\n",
    "          units=units,\n",
    "          activation=tf.nn.relu)\n",
    "\n",
    "    # Connect the final hidden layer to a dense layer with no activation to\n",
    "    # get the logits\n",
    "    # shape = (cur_batch_size * seq_len, num_feat)\n",
    "    logits = tf.layers.dense(\n",
    "        inputs=network,\n",
    "        units=params[\"num_feat\"],\n",
    "        activation=None)\n",
    "\n",
    "  # Now that we are through the final DNN for each sequence element for\n",
    "  # each example in the batch, reshape the predictions to match our labels.\n",
    "  # shape = (cur_batch_size, seq_len, num_feat)\n",
    "  predictions = tf.reshape(\n",
    "      tensor=logits,\n",
    "      shape=[cur_batch_size, params[\"seq_len\"], params[\"num_feat\"]])\n",
    "\n",
    "  if (mode == tf.estimator.ModeKeys.TRAIN and\n",
    "      params[\"training_mode\"] == \"reconstruction\"):\n",
    "    loss = tf.losses.mean_squared_error(labels=Y, predictions=predictions)\n",
    "\n",
    "    train_op = tf.contrib.layers.optimize_loss(\n",
    "        loss=loss,\n",
    "        global_step=tf.train.get_global_step(),\n",
    "        learning_rate=params[\"learning_rate\"],\n",
    "        optimizer=\"Adam\")\n",
    "\n",
    "    return loss, train_op, None, None, None, None\n",
    "  else:\n",
    "    if params[\"reverse_labels_sequence\"]:\n",
    "      # shape=(cur_batch_size, seq_len, num_feat)\n",
    "      predictions = tf.reverse_sequence(\n",
    "          input=predictions,\n",
    "          seq_lengths=tf.tile(\n",
    "              input=tf.constant(value=[params[\"seq_len\"]], dtype=tf.int64),\n",
    "              multiples=tf.expand_dims(input=cur_batch_size, axis=0)),\n",
    "          seq_axis=1,\n",
    "          batch_axis=0)\n",
    "\n",
    "    # Reshape into 2-D tensors\n",
    "    # Time based\n",
    "    # shape = (cur_batch_size * seq_len, num_feat)\n",
    "    X_time = tf.reshape(\n",
    "        tensor=X,\n",
    "        shape=[cur_batch_size * params[\"seq_len\"], params[\"num_feat\"]])\n",
    "\n",
    "    X_time_recon = tf.reshape(\n",
    "        tensor=predictions,\n",
    "        shape=[cur_batch_size * params[\"seq_len\"], params[\"num_feat\"]])\n",
    "\n",
    "    # Features based\n",
    "    # shape = (cur_batch_size, num_feat, seq_len)\n",
    "    X_transposed = tf.transpose(a=X, perm=[0, 2, 1])\n",
    "\n",
    "    # shape = (cur_batch_size * num_feat, seq_len)\n",
    "    X_feat = tf.reshape(\n",
    "        tensor=X_transposed,\n",
    "        shape=[cur_batch_size * params[\"num_feat\"], params[\"seq_len\"]])\n",
    "\n",
    "    # shape = (cur_batch_size, num_feat, seq_len)\n",
    "    predictions_transposed = tf.transpose(a=predictions, perm=[0, 2, 1])\n",
    "\n",
    "    # shape = (cur_batch_size * num_feat, seq_len)\n",
    "    X_feat_recon = tf.reshape(\n",
    "        tensor=predictions_transposed,\n",
    "        shape=[cur_batch_size * params[\"num_feat\"], params[\"seq_len\"]])\n",
    "\n",
    "    return None, None, X_time, X_time_recon, X_feat, X_feat_recon"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## autoencoder_pca.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/autoencoder_pca.py\n",
    "import tensorflow as tf\n",
    "\n",
    "from .calculate_error_distribution_statistics import non_singleton_batch_cov_variable_updating\n",
    "from .calculate_error_distribution_statistics import singleton_batch_cov_variable_updating\n",
    "\n",
    "\n",
    "# PCA model functions\n",
    "def create_pca_vars(var_name, size):\n",
    "  \"\"\"Creates PCA variables.\n",
    "\n",
    "  Given variable name and size, create and return PCA variables for count,\n",
    "  mean, covariance, eigenvalues, eignvectors, and k principal components.\n",
    "\n",
    "  Args:\n",
    "    var_name: String denoting which set of variables to create. Values are\n",
    "      \"time\" and \"feat\".\n",
    "    size: The size of the variable, either sequence length or number of\n",
    "      features.\n",
    "\n",
    "  Returns:\n",
    "    PCA variables for count, mean, covariance, eigenvalues,\n",
    "    eigenvectors, and k principal components.\n",
    "  \"\"\"\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=\"pca_vars\", reuse=tf.AUTO_REUSE):\n",
    "    count_var = tf.get_variable(\n",
    "        name=\"pca_{}_count_var\".format(var_name),\n",
    "        dtype=tf.int64,\n",
    "        initializer=tf.zeros(shape=[], dtype=tf.int64),\n",
    "        trainable=False)\n",
    "\n",
    "    mean_var = tf.get_variable(\n",
    "        name=\"pca_{}_mean_var\".format(var_name),\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(shape=[size], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "    cov_var = tf.get_variable(\n",
    "        name=\"pca_{}_cov_var\".format(var_name),\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(shape=[size, size], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "    eigval_var = tf.get_variable(\n",
    "        name=\"pca_{}_eigval_var\".format(var_name),\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(shape=[size], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "    eigvec_var = tf.get_variable(\n",
    "        name=\"pca_{}_eigvec_var\".format(var_name),\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(shape=[size, size], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "    k_pc_var = tf.get_variable(\n",
    "        name=\"pca_{}_k_principal_components_var\".format(var_name),\n",
    "        dtype=tf.int64,\n",
    "        initializer=tf.ones(shape=[], dtype=tf.int64),\n",
    "        trainable=False)\n",
    "\n",
    "  return count_var, mean_var, cov_var, eigval_var, eigvec_var, k_pc_var\n",
    "\n",
    "\n",
    "def create_both_pca_vars(seq_len, num_feat):\n",
    "  \"\"\"Creates both time & feature major PCA variables.\n",
    "\n",
    "  Given dimensions of inputs, create and return PCA variables for count,\n",
    "  mean, covariance, eigenvalues, eigenvectors, and k principal components\n",
    "  for both time and feature major representations.\n",
    "\n",
    "  Args:\n",
    "    seq_len: Number of timesteps in sequence.\n",
    "    num_feat: Number of features.\n",
    "\n",
    "  Returns:\n",
    "    PCA variables for count, mean, covariance, eigenvalues,\n",
    "    eigenvectors, and k principal components for both time and feature\n",
    "    major representations.\n",
    "  \"\"\"\n",
    "  # Time based\n",
    "  (pca_time_count_var,\n",
    "   pca_time_mean_var,\n",
    "   pca_time_cov_var,\n",
    "   pca_time_eigval_var,\n",
    "   pca_time_eigvec_var,\n",
    "   pca_time_k_pc_var) = create_pca_vars(\n",
    "       var_name=\"time\", size=num_feat)\n",
    "\n",
    "  # Features based\n",
    "  (pca_feat_count_var,\n",
    "   pca_feat_mean_var,\n",
    "   pca_feat_cov_var,\n",
    "   pca_feat_eigval_var,\n",
    "   pca_feat_eigvec_var,\n",
    "   pca_feat_k_pc_var) = create_pca_vars(\n",
    "       var_name=\"feat\", size=seq_len)\n",
    "\n",
    "  return (pca_time_count_var,\n",
    "          pca_time_mean_var,\n",
    "          pca_time_cov_var,\n",
    "          pca_time_eigval_var,\n",
    "          pca_time_eigvec_var,\n",
    "          pca_time_k_pc_var,\n",
    "          pca_feat_count_var,\n",
    "          pca_feat_mean_var,\n",
    "          pca_feat_cov_var,\n",
    "          pca_feat_eigval_var,\n",
    "          pca_feat_eigvec_var,\n",
    "          pca_feat_k_pc_var)\n",
    "\n",
    "\n",
    "def pca_reconstruction_k_pc(X_cen, pca_eigvec_var, k_pc):\n",
    "  \"\"\"PCA reconstruction with k principal components.\n",
    "\n",
    "  Given centered data matrix tensor X, variables for the column means\n",
    "  and eigenvectors, and the number of principal components, returns\n",
    "  the reconstruction of X centered.\n",
    "\n",
    "  Args:\n",
    "    X_cen: tf.float64 matrix tensor of centered input data.\n",
    "    pca_eigvec_var: tf.float64 matrix variable storing eigenvectors.\n",
    "    k_pc: Number of principal components to keep.\n",
    "\n",
    "  Returns:\n",
    "    X_cen_recon: 2D input data tensor reconstructed.\n",
    "  \"\"\"\n",
    "  # time_shape = (num_feat, num_feat)\n",
    "  # feat_shape = (seq_len, seq_len)\n",
    "  projection_matrix = tf.matmul(\n",
    "      a=pca_eigvec_var[:, -k_pc:],\n",
    "      b=pca_eigvec_var[:, -k_pc:],\n",
    "      transpose_b=True)\n",
    "\n",
    "  # time_shape = (cur_batch_size * seq_len, num_feat)\n",
    "  # feat_shape = (cur_batch_size * num_feat, seq_len)\n",
    "  X_cen_recon = tf.matmul(\n",
    "      a=X_cen,\n",
    "      b=projection_matrix)\n",
    "\n",
    "  return X_cen_recon\n",
    "\n",
    "\n",
    "def pca_reconstruction_k_pc_mse(X_cen, pca_eigvec_var, k_pc):\n",
    "  \"\"\"PCA reconstruction with k principal components.\n",
    "\n",
    "  Given centered data matrix tensor X, variables for the column means\n",
    "  and eigenvectors, and the number of principal components, returns\n",
    "  reconstruction MSE.\n",
    "\n",
    "  Args:\n",
    "    X_cen: tf.float64 matrix tensor of centered input data.\n",
    "    pca_eigvec_var: tf.float64 matrix variable storing eigenvectors.\n",
    "    k_pc: Number of principal components to keep.\n",
    "\n",
    "  Returns:\n",
    "    mse: Reconstruction mean squared error.\n",
    "  \"\"\"\n",
    "  # time_shape = (cur_batch_size * seq_len, num_feat)\n",
    "  # feat_shape = (cur_batch_size * num_feat, seq_len)\n",
    "  X_cen_recon = pca_reconstruction_k_pc(\n",
    "      X_cen, pca_eigvec_var, k_pc)\n",
    "\n",
    "  # time_shape = (cur_batch_size * seq_len, num_feat)\n",
    "  # feat_shape = (cur_batch_size * num_feat, seq_len)\n",
    "  error = X_cen - X_cen_recon\n",
    "\n",
    "  # shape = ()\n",
    "  mse = tf.reduce_mean(\n",
    "      input_tensor=tf.reduce_sum(\n",
    "          input_tensor=tf.square(x=error), axis=-1))\n",
    "\n",
    "  return mse\n",
    "\n",
    "\n",
    "def find_best_k_principal_components(X_recon_mse, pca_k_pc_var):\n",
    "  \"\"\"Find best k principal components from reconstruction MSE.\n",
    "\n",
    "  Given reconstruction MSE, return number of principal components\n",
    "  with lowest MSE in varible.\n",
    "\n",
    "  Args:\n",
    "    X_recon_mse: tf.float64 vector tensor of reconstruction mean\n",
    "      squared error.\n",
    "    pca_k_pc_var: tf.int64 scalar variable to hold best number of\n",
    "      principal components.\n",
    "\n",
    "  Returns:\n",
    "    pca_k_pc_var: Updated scalar variable now with best number of\n",
    "      principal components.\n",
    "  \"\"\"\n",
    "  best_pca_k_pc = tf.argmin(input=X_recon_mse) + 1\n",
    "\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[tf.assign(ref=pca_k_pc_var,\n",
    "                                value=best_pca_k_pc)]):\n",
    "\n",
    "    return tf.identity(input=pca_k_pc_var)\n",
    "\n",
    "\n",
    "def set_k_principal_components(user_k_pc, pca_k_pc_var):\n",
    "  \"\"\"Set k principal components from user-defined value.\n",
    "\n",
    "  Given user-defined number of principal components, return\n",
    "  variable set to this value.\n",
    "\n",
    "  Args:\n",
    "    user_k_pc: User-defined python integer for number of principal\n",
    "      components.\n",
    "    pca_k_pc_var: tf.int64 scalar variable to hold chosen number of\n",
    "      principal components.\n",
    "\n",
    "  Returns:\n",
    "    pca_k_pc_var: Updated scalar variable now with chosen number of\n",
    "      principal components.\n",
    "  \"\"\"\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[tf.assign(ref=pca_k_pc_var,\n",
    "                                value=user_k_pc)]):\n",
    "\n",
    "    return tf.identity(input=pca_k_pc_var)\n",
    "\n",
    "\n",
    "def pca_model(X, mode, params, cur_batch_size, dummy_var):\n",
    "  \"\"\"PCA to reconstruct inputs and minimize reconstruction error.\n",
    "\n",
    "  Given data matrix tensor X, the current Estimator mode, the dictionary of\n",
    "  parameters, current batch size, and the number of features, process through\n",
    "  PCA model subgraph and return reconstructed inputs as output.\n",
    "\n",
    "  Args:\n",
    "    X: tf.float64 matrix tensor of input data.\n",
    "    mode: Estimator ModeKeys. Can take values of TRAIN, EVAL, and PREDICT.\n",
    "    params: Dictionary of parameters.\n",
    "    cur_batch_size: Current batch size, could be partially filled.\n",
    "    dummy_var: Dummy variable used to allow training mode to happen since it\n",
    "      requires a gradient to tie back to the graph dependency.\n",
    "\n",
    "  Returns:\n",
    "    loss: Reconstruction loss.\n",
    "    train_op: Train operation so that Estimator can correctly add to dependency\n",
    "      graph.\n",
    "    X_time: 2D tensor representation of time major input data.\n",
    "    X_time_recon: 2D tensor representation of time major input data.\n",
    "    X_feat: 2D tensor representation of feature major input data.\n",
    "    X_feat_recon: 2D tensor representation of feature major input data.\n",
    "  \"\"\"\n",
    "  # Reshape into 2-D tensors\n",
    "  # Time based\n",
    "  # shape = (cur_batch_size * seq_len, num_feat)\n",
    "  X_time = tf.reshape(\n",
    "      tensor=X,\n",
    "      shape=[cur_batch_size * params[\"seq_len\"], params[\"num_feat\"]])\n",
    "\n",
    "  # Features based\n",
    "  # shape = (cur_batch_size, num_feat, seq_len)\n",
    "  X_transposed = tf.transpose(a=X, perm=[0, 2, 1])\n",
    "\n",
    "  # shape = (cur_batch_size * num_feat, seq_len)\n",
    "  X_feat = tf.reshape(\n",
    "      tensor=X_transposed,\n",
    "      shape=[cur_batch_size * params[\"num_feat\"], params[\"seq_len\"]])\n",
    "\n",
    "  ##############################################################################\n",
    "\n",
    "  # Variables for calculating error distribution statistics\n",
    "  (pca_time_count_var,\n",
    "   pca_time_mean_var,\n",
    "   pca_time_cov_var,\n",
    "   pca_time_eigval_var,\n",
    "   pca_time_eigvec_var,\n",
    "   pca_time_k_pc_var,\n",
    "   pca_feat_count_var,\n",
    "   pca_feat_mean_var,\n",
    "   pca_feat_cov_var,\n",
    "   pca_feat_eigval_var,\n",
    "   pca_feat_eigvec_var,\n",
    "   pca_feat_k_pc_var) = create_both_pca_vars(\n",
    "      params[\"seq_len\"], params[\"num_feat\"])\n",
    "\n",
    "  # 3. Loss function, training/eval ops\n",
    "  if (mode == tf.estimator.ModeKeys.TRAIN and\n",
    "      params[\"training_mode\"] == \"reconstruction\"):\n",
    "    if not params[\"autotune_principal_components\"]:\n",
    "      with tf.variable_scope(name_or_scope=\"pca_vars\", reuse=tf.AUTO_REUSE):\n",
    "        # Check if batch is a singleton, very important for covariance math\n",
    "\n",
    "        # Time based\n",
    "        # shape = ()\n",
    "        singleton_condition = tf.equal(\n",
    "            x=cur_batch_size * params[\"seq_len\"], y=1)\n",
    "\n",
    "        pca_time_cov_var, pca_time_mean_var, pca_time_count_var = tf.cond(\n",
    "            pred=singleton_condition,\n",
    "            true_fn=lambda: singleton_batch_cov_variable_updating(\n",
    "                params[\"seq_len\"],\n",
    "                X_time,\n",
    "                pca_time_count_var,\n",
    "                pca_time_mean_var,\n",
    "                pca_time_cov_var),\n",
    "            false_fn=lambda: non_singleton_batch_cov_variable_updating(\n",
    "                cur_batch_size,\n",
    "                params[\"seq_len\"],\n",
    "                X_time,\n",
    "                pca_time_count_var,\n",
    "                pca_time_mean_var,\n",
    "                pca_time_cov_var))\n",
    "\n",
    "        # shape = (num_feat,) & (num_feat, num_feat)\n",
    "        pca_time_eigval_tensor, pca_time_eigvec_tensor = tf.linalg.eigh(\n",
    "            tensor=pca_time_cov_var)\n",
    "\n",
    "        if params[\"k_principal_components_time\"] is not None:\n",
    "          pca_time_k_pc = set_k_principal_components(\n",
    "              params[\"k_principal_components_time\"], pca_time_k_pc_var)\n",
    "        else:\n",
    "          pca_time_k_pc = tf.zeros(shape=(), dtype=tf.float64)\n",
    "\n",
    "        # Features based\n",
    "        # shape = ()\n",
    "        singleton_features_condition = tf.equal(\n",
    "            x=cur_batch_size * params[\"num_feat\"], y=1)\n",
    "\n",
    "        pca_feat_cov_var, pca_feat_mean_var, pca_feat_count_var = tf.cond(\n",
    "            pred=singleton_features_condition,\n",
    "            true_fn=lambda: singleton_batch_cov_variable_updating(\n",
    "                params[\"num_feat\"],\n",
    "                X_feat,\n",
    "                pca_feat_count_var, pca_feat_mean_var,\n",
    "                pca_feat_cov_var),\n",
    "            false_fn=lambda: non_singleton_batch_cov_variable_updating(\n",
    "                cur_batch_size,\n",
    "                params[\"num_feat\"],\n",
    "                X_feat,\n",
    "                pca_feat_count_var,\n",
    "                pca_feat_mean_var,\n",
    "                pca_feat_cov_var))\n",
    "\n",
    "        # shape = (seq_len,) & (seq_len, seq_len)\n",
    "        pca_feat_eigval_tensor, pca_feat_eigvec_tensor = tf.linalg.eigh(\n",
    "            tensor=pca_feat_cov_var)\n",
    "\n",
    "        if params[\"k_principal_components_feat\"] is not None:\n",
    "          pca_feat_k_pc = set_k_principal_components(\n",
    "              params[\"k_principal_components_feat\"], pca_feat_k_pc_var)\n",
    "        else:\n",
    "          pca_feat_k_pc = tf.zeros(shape=(), dtype=tf.float64)\n",
    "\n",
    "      # Lastly use control dependencies around loss to enforce the mahalanobis\n",
    "      # variables to be assigned, the control order matters, hence the separate\n",
    "      # contexts\n",
    "      with tf.control_dependencies(\n",
    "          control_inputs=[pca_time_cov_var, pca_feat_cov_var]):\n",
    "        with tf.control_dependencies(\n",
    "            control_inputs=[pca_time_mean_var, pca_feat_mean_var]):\n",
    "          with tf.control_dependencies(\n",
    "              control_inputs=[pca_time_count_var, pca_feat_count_var]):\n",
    "            with tf.control_dependencies(\n",
    "                control_inputs=[tf.assign(ref=pca_time_eigval_var,\n",
    "                                          value=pca_time_eigval_tensor),\n",
    "                                tf.assign(ref=pca_time_eigvec_var,\n",
    "                                          value=pca_time_eigvec_tensor),\n",
    "                                tf.assign(ref=pca_feat_eigval_var,\n",
    "                                          value=pca_feat_eigval_tensor),\n",
    "                                tf.assign(ref=pca_feat_eigvec_var,\n",
    "                                          value=pca_feat_eigvec_tensor),\n",
    "                                pca_time_k_pc,\n",
    "                                pca_feat_k_pc]):\n",
    "\n",
    "\n",
    "              loss = tf.reduce_sum(\n",
    "                  input_tensor=tf.zeros(\n",
    "                      shape=(), dtype=tf.float64) * dummy_var)\n",
    "\n",
    "              train_op = tf.contrib.layers.optimize_loss(\n",
    "                  loss=loss,\n",
    "                  global_step=tf.train.get_global_step(),\n",
    "                  learning_rate=params[\"learning_rate\"],\n",
    "                  optimizer=\"SGD\")\n",
    "\n",
    "              return loss, train_op, None, None, None, None\n",
    "    else:\n",
    "      # Time based\n",
    "      if params[\"k_principal_components_time\"] is None:\n",
    "        # shape = (cur_batch_size * seq_len, num_feat)\n",
    "        X_time_cen = X_time - pca_time_mean_var\n",
    "\n",
    "        # shape = (num_feat - 1,)\n",
    "        X_time_recon_mse = tf.map_fn(\n",
    "            fn=lambda x: pca_reconstruction_k_pc_mse(\n",
    "                X_time_cen, pca_time_eigvec_var, x),\n",
    "            elems=tf.range(start=1,\n",
    "                           limit=params[\"num_feat\"],\n",
    "                           dtype=tf.int64),\n",
    "            dtype=tf.float64)\n",
    "\n",
    "        pca_time_k_pc = find_best_k_principal_components(\n",
    "            X_time_recon_mse, pca_time_k_pc_var)\n",
    "      else:\n",
    "        pca_time_k_pc = set_k_principal_components(\n",
    "            params[\"k_principal_components_time\"], pca_time_k_pc_var)\n",
    "\n",
    "      if params[\"k_principal_components_feat\"] is None:\n",
    "        # Features based\n",
    "        # shape = (cur_batch_size * num_feat, seq_len)\n",
    "        X_feat_cen = X_feat - pca_feat_mean_var\n",
    "\n",
    "        # shape = (seq_len - 1,)\n",
    "        X_feat_recon_mse = tf.map_fn(\n",
    "            fn=lambda x: pca_reconstruction_k_pc_mse(\n",
    "                X_feat_cen, pca_feat_eigvec_var, x),\n",
    "            elems=tf.range(start=1,\n",
    "                           limit=params[\"seq_len\"],\n",
    "                           dtype=tf.int64),\n",
    "            dtype=tf.float64)\n",
    "\n",
    "        pca_feat_k_pc = find_best_k_principal_components(\n",
    "            X_feat_recon_mse, pca_feat_k_pc_var)\n",
    "      else:\n",
    "        pca_feat_k_pc = set_k_principal_components(\n",
    "            params[\"k_principal_components_feat\"], pca_feat_k_pc_var)\n",
    "\n",
    "      with tf.control_dependencies(\n",
    "          control_inputs=[pca_time_k_pc, pca_feat_k_pc]):\n",
    "        loss = tf.reduce_sum(\n",
    "            input_tensor=tf.zeros(\n",
    "                shape=(), dtype=tf.float64) * dummy_var)\n",
    "\n",
    "        train_op = tf.contrib.layers.optimize_loss(\n",
    "            loss=loss,\n",
    "            global_step=tf.train.get_global_step(),\n",
    "            learning_rate=params[\"learning_rate\"],\n",
    "            optimizer=\"SGD\")\n",
    "\n",
    "        return loss, train_op, None, None, None, None\n",
    "\n",
    "  else:\n",
    "    # Time based\n",
    "    # shape = (cur_batch_size * seq_len, num_feat)\n",
    "    X_time_cen = X_time - pca_time_mean_var\n",
    "\n",
    "    # shape = (cur_batch_size * seq_len, num_feat)\n",
    "    if params[\"k_principal_components_time\"] is None:\n",
    "      X_time_recon = pca_reconstruction_k_pc(\n",
    "          X_time_cen,\n",
    "          pca_time_eigvec_var,\n",
    "          pca_time_k_pc_var)\n",
    "    else:\n",
    "      X_time_recon = pca_reconstruction_k_pc(\n",
    "          X_time_cen,\n",
    "          pca_time_eigvec_var,\n",
    "          params[\"k_principal_components_time\"])\n",
    "\n",
    "    # Features based\n",
    "    # shape = (cur_batch_size * num_feat, seq_len)\n",
    "    X_feat_cen = X_feat - pca_feat_mean_var\n",
    "\n",
    "    # shape = (cur_batch_size * num_feat, seq_len)\n",
    "    if params[\"k_principal_components_feat\"] is None:\n",
    "      X_feat_recon = pca_reconstruction_k_pc(\n",
    "          X_feat_cen,\n",
    "          pca_feat_eigvec_var,\n",
    "          pca_feat_k_pc_var)\n",
    "    else:\n",
    "      X_feat_recon = pca_reconstruction_k_pc(\n",
    "          X_feat_cen,\n",
    "          pca_feat_eigvec_var,\n",
    "          params[\"k_principal_components_feat\"])\n",
    "\n",
    "    return None, None, X_time_cen, X_time_recon, X_feat_cen, X_feat_recon"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## reconstruction.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/reconstruction.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "def reconstruction_evaluation(X_time_orig, X_time_recon, training_mode):\n",
    "  \"\"\"Reconstruction loss on evaluation set.\n",
    "\n",
    "  Given time major original and reconstructed features data and the training\n",
    "  mode, return loss and eval_metrics_ops.\n",
    "\n",
    "  Args:\n",
    "    X_time_orig: Time major original features data.\n",
    "    X_time_recon: Time major reconstructed features data.\n",
    "    training_mode: Current training mode.\n",
    "\n",
    "  Returns:\n",
    "    loss: Scalar reconstruction loss.\n",
    "    eval_metric_ops: Evaluation metrics of reconstruction.\n",
    "  \"\"\"\n",
    "  loss = tf.losses.mean_squared_error(\n",
    "      labels=X_time_orig, predictions=X_time_recon)\n",
    "\n",
    "  eval_metric_ops = None\n",
    "\n",
    "  if training_mode == \"reconstruction\":\n",
    "    # Reconstruction eval metrics\n",
    "    eval_metric_ops = {\n",
    "        \"rmse\": tf.metrics.root_mean_squared_error(\n",
    "            labels=X_time_orig, predictions=X_time_recon),\n",
    "        \"mae\": tf.metrics.mean_absolute_error(\n",
    "            labels=X_time_orig, predictions=X_time_recon)\n",
    "    }\n",
    "\n",
    "  return loss, eval_metric_ops"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## error_distribution_vars.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/error_distribution_vars.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "def create_mahalanobis_dist_vars(var_name, size):\n",
    "  \"\"\"Creates mahalanobis distance variables.\n",
    "\n",
    "  Given variable name and size, create and return mahalanobis distance variables\n",
    "  for count, mean, covariance, and inverse covariance.\n",
    "\n",
    "  Args:\n",
    "    var_name: String denoting which set of variables to create. Values are\n",
    "      \"time\" and \"feat\".\n",
    "    size: The size of the variable, either sequence length or number of\n",
    "      features.\n",
    "\n",
    "  Returns:\n",
    "    Mahalanobis distance variables for count, mean, covariance, and inverse\n",
    "    covariance.\n",
    "  \"\"\"\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=\"mahalanobis_dist_vars\", reuse=tf.AUTO_REUSE):\n",
    "    count_var = tf.get_variable(\n",
    "        name=\"abs_err_count_{0}_var\".format(var_name),\n",
    "        dtype=tf.int64,\n",
    "        initializer=tf.zeros(shape=[], dtype=tf.int64),\n",
    "        trainable=False)\n",
    "\n",
    "    mean_var = tf.get_variable(\n",
    "        name=\"abs_err_mean_{0}_var\".format(var_name),\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(shape=[size], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "    cov_var = tf.get_variable(\n",
    "        name=\"abs_err_cov_{0}_var\".format(var_name),\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(shape=[size, size], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "    inv_cov_var = tf.get_variable(\n",
    "        name=\"abs_err_inv_cov_{0}_var\".format(var_name),\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(shape=[size, size], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "  return count_var, mean_var, cov_var, inv_cov_var\n",
    "\n",
    "\n",
    "def create_both_mahalanobis_dist_vars(seq_len, num_feat):\n",
    "  \"\"\"Creates both time & feature major mahalanobis distance variables.\n",
    "\n",
    "  Given dimensions of inputs, create and return mahalanobis distance variables\n",
    "  for count, mean, covariance, and inverse covariance for both time and\n",
    "  feature major representations.\n",
    "\n",
    "  Args:\n",
    "    seq_len: Number of timesteps in sequence.\n",
    "    num_feat: Number of features.\n",
    "\n",
    "  Returns:\n",
    "    Mahalanobis distance variables for count, mean, covariance, and inverse\n",
    "    covariance for both time and feature major representations.\n",
    "  \"\"\"\n",
    "  # Time based\n",
    "  (abs_err_count_time_var,\n",
    "   abs_err_mean_time_var,\n",
    "   abs_err_cov_time_var,\n",
    "   abs_err_inv_cov_time_var) = create_mahalanobis_dist_vars(\n",
    "       var_name=\"time\", size=num_feat)\n",
    "\n",
    "  # Features based\n",
    "  (abs_err_count_feat_var,\n",
    "   abs_err_mean_feat_var,\n",
    "   abs_err_cov_feat_var,\n",
    "   abs_err_inv_cov_feat_var) = create_mahalanobis_dist_vars(\n",
    "       var_name=\"feat\", size=seq_len)\n",
    "\n",
    "  return (abs_err_count_time_var,\n",
    "          abs_err_mean_time_var,\n",
    "          abs_err_cov_time_var,\n",
    "          abs_err_inv_cov_time_var,\n",
    "          abs_err_count_feat_var,\n",
    "          abs_err_mean_feat_var,\n",
    "          abs_err_cov_feat_var,\n",
    "          abs_err_inv_cov_feat_var)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## calculate_error_distribution_statistics.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/calculate_error_distribution_statistics.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "# Running covariance updating functions for mahalanobis distance variables\n",
    "def update_record_count(count_a, count_b):\n",
    "  \"\"\"Updates the running number of records processed.\n",
    "\n",
    "  Given previous running total and current batch size, return new running total.\n",
    "\n",
    "  Args:\n",
    "    count_a: tf.int64 scalar tensor of previous running total of records.\n",
    "    count_b: tf.int64 scalar tensor of current batch size.\n",
    "\n",
    "  Returns:\n",
    "    A tf.int64 scalar tensor of new running total of records.\n",
    "  \"\"\"\n",
    "  return count_a + count_b\n",
    "\n",
    "\n",
    "# Incremental covariance updating functions for mahalanobis distance variables\n",
    "\n",
    "\n",
    "def update_mean_incremental(count_a, mean_a, value_b):\n",
    "  \"\"\"Updates the running mean vector incrementally.\n",
    "\n",
    "  Given previous running total, running column means, and single example's\n",
    "  column values, return new running column means.\n",
    "\n",
    "  Args:\n",
    "    count_a: tf.int64 scalar tensor of previous running total of records.\n",
    "    mean_a: tf.float64 vector tensor of previous running column means.\n",
    "    value_b: tf.float64 vector tensor of single example's column values.\n",
    "\n",
    "  Returns:\n",
    "    A tf.float64 vector tensor of new running column means.\n",
    "  \"\"\"\n",
    "  umean_a = mean_a * tf.cast(x=count_a, dtype=tf.float64)\n",
    "  mean_ab_num = umean_a + tf.squeeze(input=value_b, axis=0)\n",
    "  mean_ab = mean_ab_num / tf.cast(x=count_a + 1, dtype=tf.float64)\n",
    "\n",
    "  return mean_ab\n",
    "\n",
    "\n",
    "# This function updates the covariance matrix incrementally\n",
    "def update_cov_incremental(\n",
    "    count_a, mean_a, cov_a, value_b, mean_ab, sample_cov):\n",
    "  \"\"\"Updates the running covariance matrix incrementally.\n",
    "\n",
    "  Given previous running total, running column means, running covariance matrix,\n",
    "  single example's column values, new running column means, and whether to use\n",
    "  sample covariance or not, return new running covariance matrix.\n",
    "\n",
    "  Args:\n",
    "    count_a: tf.int64 scalar tensor of previous running total of records.\n",
    "    mean_a: tf.float64 vector tensor of previous running column means.\n",
    "    cov_a: tf.float64 matrix tensor of previous running covariance matrix.\n",
    "    value_b: tf.float64 vector tensor of single example's column values.\n",
    "    mean_ab: tf.float64 vector tensor of new running column means.\n",
    "    sample_cov: Bool flag on whether sample or population covariance is used.\n",
    "\n",
    "  Returns:\n",
    "    A tf.float64 matrix tensor of new covariance matrix.\n",
    "  \"\"\"\n",
    "  mean_diff = tf.matmul(\n",
    "      a=value_b - mean_a, b=value_b - mean_ab, transpose_a=True)\n",
    "\n",
    "  if sample_cov:\n",
    "    ucov_a = cov_a * tf.cast(x=count_a - 1, dtype=tf.float64)\n",
    "    cov_ab = (ucov_a + mean_diff) / tf.cast(x=count_a, dtype=tf.float64)\n",
    "  else:\n",
    "    ucov_a = cov_a * tf.cast(x=count_a, dtype=tf.float64)\n",
    "    cov_ab = (ucov_a + mean_diff) / tf.cast(x=count_a + 1, dtype=tf.float64)\n",
    "\n",
    "  return cov_ab\n",
    "\n",
    "\n",
    "def singleton_batch_cov_variable_updating(\n",
    "    inner_size, X, count_variable, mean_variable, cov_variable):\n",
    "  \"\"\"Updates mahalanobis variables incrementally when number_of_rows equals 1.\n",
    "\n",
    "  Given the inner size of the matrix, the data vector X, the variable tracking\n",
    "  running record counts, the variable tracking running column means, and the\n",
    "  variable tracking running covariance matrix, returns updated running\n",
    "  covariance matrix, running column means, and running record count variables.\n",
    "\n",
    "  Args:\n",
    "    inner_size: Inner size of matrix X.\n",
    "    X: tf.float64 matrix tensor of input data.\n",
    "    count_variable: tf.int64 scalar variable tracking running record counts.\n",
    "    mean_variable: tf.float64 vector variable tracking running column means.\n",
    "    cov_variable: tf.float64 matrix variable tracking running covariance matrix.\n",
    "\n",
    "  Returns:\n",
    "    Updated running covariance matrix, running column means, and running record\n",
    "      count variables.\n",
    "  \"\"\"\n",
    "  # Calculate new combined mean for incremental covariance matrix calculation\n",
    "  # time_shape = (num_feat,), features_shape = (seq_len,)\n",
    "  mean_ab = update_mean_incremental(\n",
    "      count_a=count_variable, mean_a=mean_variable, value_b=X)\n",
    "\n",
    "  # Update running variables from single example\n",
    "  # time_shape = (), features_shape = ()\n",
    "  count_tensor = update_record_count(count_a=count_variable, count_b=1)\n",
    "\n",
    "  # time_shape = (num_feat,), features_shape = (seq_len,)\n",
    "  mean_tensor = mean_ab\n",
    "\n",
    "  # Check if inner dimension is greater than 1 to calculate covariance matrix\n",
    "  if inner_size == 1:\n",
    "    cov_tensor = tf.zeros_like(tensor=cov_variable, dtype=tf.float64)\n",
    "  else:\n",
    "    # time_shape = (num_feat, num_feat)\n",
    "    # features_shape = (seq_len, seq_len)\n",
    "    cov_tensor = update_cov_incremental(\n",
    "        count_a=count_variable,\n",
    "        mean_a=mean_variable,\n",
    "        cov_a=cov_variable,\n",
    "        value_b=X,\n",
    "        mean_ab=mean_ab,\n",
    "        sample_cov=True)\n",
    "\n",
    "  # Assign values to variables, use control dependencies around return to\n",
    "  # enforce the mahalanobis variables to be assigned, the control order matters,\n",
    "  # hence the separate contexts.\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[tf.assign(ref=cov_variable, value=cov_tensor)]):\n",
    "    with tf.control_dependencies(\n",
    "        control_inputs=[tf.assign(ref=mean_variable, value=mean_tensor)]):\n",
    "      with tf.control_dependencies(\n",
    "          control_inputs=[tf.assign(ref=count_variable, value=count_tensor)]):\n",
    "\n",
    "        return (tf.identity(input=cov_variable),\n",
    "                tf.identity(input=mean_variable),\n",
    "                tf.identity(input=count_variable))\n",
    "\n",
    "\n",
    "def singleton_batch_var_variable_updating(\n",
    "    inner_size, x, count_variable, mean_variable, var_variable):\n",
    "  \"\"\"Updates mahalanobis thresh vars incrementally when number_of_rows equals 1.\n",
    "\n",
    "  Given the inner size of the matrix, the data scalar x, the variable tracking\n",
    "  running record counts, the variable tracking the running mean, and the\n",
    "  variable tracking the running variance, returns updated running variance,\n",
    "  running mean, and running record count variables.\n",
    "\n",
    "  Args:\n",
    "    inner_size: Inner size of matrix X.\n",
    "    x: tf.float64 scalar tensor of input data.\n",
    "    count_variable: tf.int64 scalar variable tracking running record counts.\n",
    "    mean_variable: tf.float64 scalar variable tracking running mean.\n",
    "    var_variable: tf.float64 scalar variable tracking running variance.\n",
    "\n",
    "  Returns:\n",
    "    Updated running variance, running mean, and running record count variables.\n",
    "  \"\"\"\n",
    "  # Calculate new combined mean for incremental covariance matrix calculation\n",
    "  # time_shape = (), features_shape = ()\n",
    "  mean_ab = update_mean_incremental(\n",
    "      count_a=count_variable, mean_a=mean_variable, value_b=x)\n",
    "\n",
    "  # Update running variables from single example\n",
    "  # time_shape = (), features_shape = ()\n",
    "  count_tensor = update_record_count(count_a=count_variable, count_b=1)\n",
    "\n",
    "  # time_shape = (), features_shape = ()\n",
    "  mean_tensor = mean_ab\n",
    "\n",
    "  # Check if inner dimension is greater than 1 to calculate covariance matrix\n",
    "  if inner_size == 1:\n",
    "    var_tensor = tf.zeros_like(tensor=var_variable, dtype=tf.float64)\n",
    "  else:\n",
    "    # time_shape = (), features_shape = ()\n",
    "    var_tensor = update_cov_incremental(\n",
    "        count_a=count_variable,\n",
    "        mean_a=tf.reshape(tensor=mean_variable, shape=[1]),\n",
    "        cov_a=tf.reshape(tensor=var_variable, shape=[1, 1]),\n",
    "        value_b=tf.reshape(tensor=x, shape=[1, 1]),\n",
    "        mean_ab=tf.reshape(tensor=mean_ab, shape=[1]),\n",
    "        sample_cov=True)\n",
    "\n",
    "    var_tensor = tf.squeeze(input=var_tensor)\n",
    "\n",
    "  # Assign values to variables, use control dependencies around return to\n",
    "  # enforce the mahalanobis variables to be assigned, the control order matters,\n",
    "  # hence the separate contexts.\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[tf.assign(ref=var_variable, value=var_tensor)]):\n",
    "    with tf.control_dependencies(\n",
    "        control_inputs=[tf.assign(ref=mean_variable, value=mean_tensor)]):\n",
    "      with tf.control_dependencies(\n",
    "          control_inputs=[tf.assign(ref=count_variable, value=count_tensor)]):\n",
    "\n",
    "        return (tf.identity(input=var_variable),\n",
    "                tf.identity(input=mean_variable),\n",
    "                tf.identity(input=count_variable))\n",
    "\n",
    "\n",
    "# Batch covariance updating functions for mahalanobis distance variables\n",
    "\n",
    "\n",
    "def update_mean_batch(count_a, mean_a, count_b, mean_b):\n",
    "  \"\"\"Updates the running mean vector with a batch of data.\n",
    "\n",
    "  Given previous running total, running column means, current batch size, and\n",
    "  batch's column means, return new running column means.\n",
    "\n",
    "  Args:\n",
    "    count_a: tf.int64 scalar tensor of previous running total of records.\n",
    "    mean_a: tf.float64 vector tensor of previous running column means.\n",
    "    count_b: tf.int64 scalar tensor of current batch size.\n",
    "    mean_b: tf.float64 vector tensor of batch's column means.\n",
    "\n",
    "  Returns:\n",
    "    A tf.float64 vector tensor of new running column means.\n",
    "  \"\"\"\n",
    "  sum_a = mean_a * tf.cast(x=count_a, dtype=tf.float64)\n",
    "  sum_b = mean_b * tf.cast(x=count_b, dtype=tf.float64)\n",
    "  mean_ab = (sum_a + sum_b) / tf.cast(x=count_a + count_b, dtype=tf.float64)\n",
    "\n",
    "  return mean_ab\n",
    "\n",
    "\n",
    "def update_cov_batch(\n",
    "    count_a, mean_a, cov_a, count_b, mean_b, cov_b, sample_cov):\n",
    "  \"\"\"Updates the running covariance matrix with batch of data.\n",
    "\n",
    "  Given previous running total, running column means, running covariance matrix,\n",
    "  current batch size, batch's column means, batch's covariance matrix, and\n",
    "  whether to use sample covariance or not, return new running covariance matrix.\n",
    "\n",
    "  Args:\n",
    "    count_a: tf.int64 scalar tensor of previous running total of records.\n",
    "    mean_a: tf.float64 vector tensor of previous running column means.\n",
    "    cov_a: tf.float64 matrix tensor of previous running covariance matrix.\n",
    "    count_b: tf.int64 scalar tensor of current batch size.\n",
    "    mean_b: tf.float64 vector tensor of batch's column means.\n",
    "    cov_b: tf.float64 matrix tensor of batch's covariance matrix.\n",
    "    sample_cov: Bool flag on whether sample or population covariance is used.\n",
    "\n",
    "  Returns:\n",
    "    A tf.float64 matrix tensor of new running covariance matrix.\n",
    "  \"\"\"\n",
    "  mean_diff = tf.expand_dims(input=mean_a - mean_b, axis=0)\n",
    "\n",
    "  if sample_cov:\n",
    "    ucov_a = cov_a * tf.cast(x=count_a - 1, dtype=tf.float64)\n",
    "    ucov_b = cov_b * tf.cast(x=count_b - 1, dtype=tf.float64)\n",
    "    den = tf.cast(x=count_a + count_b - 1, dtype=tf.float64)\n",
    "  else:\n",
    "    ucov_a = cov_a * tf.cast(x=count_a, dtype=tf.float64)\n",
    "    ucov_b = cov_b * tf.cast(x=count_b, dtype=tf.float64)\n",
    "    den = tf.cast(x=count_a + count_b, dtype=tf.float64)\n",
    "\n",
    "  mean_diff = tf.matmul(a=mean_diff, b=mean_diff, transpose_a=True)\n",
    "  mean_scaling_num = tf.cast(x=count_a * count_b, dtype=tf.float64)\n",
    "  mean_scaling_den = tf.cast(x=count_a + count_b, dtype=tf.float64)\n",
    "  mean_scaling = mean_scaling_num / mean_scaling_den\n",
    "  cov_ab = (ucov_a + ucov_b + mean_diff * mean_scaling) / den\n",
    "\n",
    "  return cov_ab\n",
    "\n",
    "\n",
    "def non_singleton_batch_cov_variable_updating(\n",
    "    cur_batch_size, inner_size, X, count_variable, mean_variable, cov_variable):\n",
    "  \"\"\"Updates mahalanobis variables when number_of_rows does NOT equal 1.\n",
    "\n",
    "  Given the current batch size, inner size of the matrix, the data matrix X,\n",
    "  the variable tracking running record counts, the variable tracking running\n",
    "  column means, and the variable tracking running covariance matrix, returns\n",
    "  updated running covariance matrix, running column means, and running record\n",
    "  count variables.\n",
    "\n",
    "  Args:\n",
    "    cur_batch_size: Number of examples in current batch (could be partial).\n",
    "    inner_size: Inner size of matrix X.\n",
    "    X: tf.float64 matrix tensor of input data.\n",
    "    count_variable: tf.int64 scalar variable tracking running record counts.\n",
    "    mean_variable: tf.float64 vector variable tracking running column means.\n",
    "    cov_variable: tf.float64 matrix variable tracking running covariance matrix.\n",
    "\n",
    "  Returns:\n",
    "    Updated running covariance matrix, running column means, and running record\n",
    "      count variables.\n",
    "  \"\"\"\n",
    "  # Find statistics of batch\n",
    "  number_of_rows = cur_batch_size * inner_size\n",
    "\n",
    "  # time_shape = (num_feat,), features_shape = (seq_len,)\n",
    "  X_mean = tf.reduce_mean(input_tensor=X, axis=0)\n",
    "\n",
    "  # time_shape = (cur_batch_size * seq_len, num_feat)\n",
    "  # features_shape = (cur_batch_size * num_feat, seq_len)\n",
    "  X_centered = X - X_mean\n",
    "\n",
    "  if inner_size > 1:\n",
    "    # time_shape = (num_feat, num_feat)\n",
    "    # features_shape = (seq_len, seq_len)\n",
    "    X_cov = tf.matmul(\n",
    "        a=X_centered,\n",
    "        b=X_centered,\n",
    "        transpose_a=True) / tf.cast(x=number_of_rows - 1, dtype=tf.float64)\n",
    "\n",
    "  # Update running variables from batch statistics\n",
    "  # time_shape = (), features_shape = ()\n",
    "  count_tensor = update_record_count(\n",
    "      count_a=count_variable, count_b=number_of_rows)\n",
    "\n",
    "  # time_shape = (num_feat,), features_shape = (seq_len,)\n",
    "  mean_tensor = update_mean_batch(\n",
    "      count_a=count_variable,\n",
    "      mean_a=mean_variable,\n",
    "      count_b=number_of_rows,\n",
    "      mean_b=X_mean)\n",
    "\n",
    "  # Check if inner dimension is greater than 1 to calculate covariance matrix\n",
    "  if inner_size == 1:\n",
    "    cov_tensor = tf.zeros_like(tensor=cov_variable, dtype=tf.float64)\n",
    "  else:\n",
    "    # time_shape = (num_feat, num_feat)\n",
    "    # features_shape = (seq_len, seq_len)\n",
    "    cov_tensor = update_cov_batch(\n",
    "        count_a=count_variable,\n",
    "        mean_a=mean_variable,\n",
    "        cov_a=cov_variable,\n",
    "        count_b=number_of_rows,\n",
    "        mean_b=X_mean,\n",
    "        cov_b=X_cov,\n",
    "        sample_cov=True)\n",
    "\n",
    "  # Assign values to variables, use control dependencies around return to\n",
    "  # enforce the mahalanobis variables to be assigned, the control order matters,\n",
    "  # hence the separate contexts.\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[tf.assign(ref=cov_variable, value=cov_tensor)]):\n",
    "    with tf.control_dependencies(\n",
    "        control_inputs=[tf.assign(ref=mean_variable, value=mean_tensor)]):\n",
    "      with tf.control_dependencies(\n",
    "          control_inputs=[tf.assign(ref=count_variable, value=count_tensor)]):\n",
    "\n",
    "        return (tf.identity(input=cov_variable),\n",
    "                tf.identity(input=mean_variable),\n",
    "                tf.identity(input=count_variable))\n",
    "\n",
    "\n",
    "def non_singleton_batch_var_variable_updating(\n",
    "    cur_batch_size, inner_size, x, count_variable, mean_variable, var_variable):\n",
    "  \"\"\"Updates mahalanobis thresh variables when number_of_rows does NOT equal 1.\n",
    "\n",
    "  Given the current batch size, inner size of the matrix, the data vector x,\n",
    "  the variable tracking the running record count, the variable tracking the\n",
    "  running mean, and the variable tracking the running variance, returns\n",
    "  updated running variance, running mean, and running record count variables.\n",
    "\n",
    "  Args:\n",
    "    cur_batch_size: Number of examples in current batch (could be partial).\n",
    "    inner_size: Inner size of matrix X.\n",
    "    x: tf.float64 vector tensor of mahalanobis distance.\n",
    "    count_variable: tf.int64 scalar variable tracking running record count.\n",
    "    mean_variable: tf.float64 scalar variable tracking running mean.\n",
    "    var_variable: tf.float64 scalar variable tracking running variance.\n",
    "\n",
    "  Returns:\n",
    "    Updated running variance, running mean, and running record count variables.\n",
    "  \"\"\"\n",
    "  # Find statistics of batch\n",
    "  number_of_rows = cur_batch_size * inner_size\n",
    "\n",
    "  # time_shape = (), features_shape = ()\n",
    "  x_mean = tf.reduce_mean(input_tensor=x)\n",
    "\n",
    "  # time_shape = (cur_batch_size * seq_len,)\n",
    "  # features_shape = (cur_batch_size * num_feat,)\n",
    "  x_centered = x - x_mean\n",
    "\n",
    "  if inner_size > 1:\n",
    "    # time_shape = (), features_shape = ()\n",
    "    x_var = tf.reduce_sum(input_tensor=tf.square(x=x_centered))\n",
    "    x_var /= tf.cast(x=number_of_rows - 1, dtype=tf.float64)\n",
    "\n",
    "  # Update running variables from batch statistics\n",
    "  # time_shape = (), features_shape = ()\n",
    "  count_tensor = update_record_count(\n",
    "      count_a=count_variable, count_b=number_of_rows)\n",
    "\n",
    "  # time_shape = (), features_shape = ()\n",
    "  mean_tensor = update_mean_batch(\n",
    "      count_a=count_variable,\n",
    "      mean_a=mean_variable,\n",
    "      count_b=number_of_rows,\n",
    "      mean_b=x_mean)\n",
    "\n",
    "  # Check if inner dimension is greater than 1 to calculate covariance matrix\n",
    "  if inner_size == 1:\n",
    "    var_tensor = tf.zeros_like(tensor=var_variable, dtype=tf.float64)\n",
    "  else:\n",
    "    # time_shape = (num_feat, num_feat)\n",
    "    # features_shape = (seq_len, seq_len)\n",
    "    var_tensor = update_cov_batch(\n",
    "        count_a=count_variable,\n",
    "        mean_a=mean_variable,\n",
    "        cov_a=var_variable,\n",
    "        count_b=number_of_rows,\n",
    "        mean_b=tf.expand_dims(input=x_mean, axis=0),\n",
    "        cov_b=tf.reshape(tensor=x_var, shape=[1, 1]),\n",
    "        sample_cov=True)\n",
    "\n",
    "    var_tensor = tf.squeeze(input=var_tensor)\n",
    "\n",
    "  # Assign values to variables, use control dependencies around return to\n",
    "  # enforce the mahalanobis thresh variables to be assigned, the control order\n",
    "  # matters, hence the separate contexts.\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[tf.assign(ref=var_variable, value=var_tensor)]):\n",
    "    with tf.control_dependencies(\n",
    "        control_inputs=[tf.assign(ref=mean_variable, value=mean_tensor)]):\n",
    "      with tf.control_dependencies(\n",
    "          control_inputs=[tf.assign(ref=count_variable, value=count_tensor)]):\n",
    "\n",
    "        return (tf.identity(input=var_variable),\n",
    "                tf.identity(input=mean_variable),\n",
    "                tf.identity(input=count_variable))\n",
    "\n",
    "\n",
    "def mahalanobis_dist(err_vec, mean_vec, inv_cov, final_shape):\n",
    "  \"\"\"Calculates mahalanobis distance from MLE.\n",
    "\n",
    "  Given reconstruction error vector, mean reconstruction error vector, inverse\n",
    "  covariance of reconstruction error, and mahalanobis distance tensor's final\n",
    "  shape, return mahalanobis distance.\n",
    "\n",
    "  Args:\n",
    "    err_vec: tf.float64 matrix tensor of reconstruction errors.\n",
    "    mean_vec: tf.float64 vector variable tracking running column means of\n",
    "      reconstruction errors.\n",
    "    inv_cov: tf.float64 matrix variable tracking running covariance matrix of\n",
    "      reconstruction errors.\n",
    "    final_shape: Final shape of mahalanobis distance tensor.\n",
    "\n",
    "  Returns:\n",
    "    tf.float64 matrix tensor of mahalanobis distance.\n",
    "  \"\"\"\n",
    "  # time_shape = (cur_batch_size * seq_len, num_feat)\n",
    "  # features_shape = (cur_batch_size * num_feat, seq_len)\n",
    "  err_vec_cen = err_vec - mean_vec\n",
    "\n",
    "  # time_shape = (num_feat, cur_batch_size * seq_len)\n",
    "  # features_shape = (seq_len, cur_batch_size * num_feat)\n",
    "  mahalanobis_right_product = tf.matmul(\n",
    "      a=inv_cov, b=err_vec_cen, transpose_b=True)\n",
    "\n",
    "  # time_shape = (cur_batch_size * seq_len, cur_batch_size * seq_len)\n",
    "  # features_shape = (cur_batch_size * num_feat, cur_batch_size * num_feat)\n",
    "  mahalanobis_dist_vectorized = tf.matmul(\n",
    "      a=err_vec_cen, b=mahalanobis_right_product)\n",
    "\n",
    "  # time_shape = (cur_batch_size * seq_len,)\n",
    "  # features_shape = (cur_batch_size * num_feat,)\n",
    "  mahalanobis_dist_flat = tf.diag_part(input=mahalanobis_dist_vectorized)\n",
    "\n",
    "  # time_shape = (cur_batch_size, seq_len)\n",
    "  # features_shape = (cur_batch_size, num_feat)\n",
    "  mahalanobis_dist_final_shaped = tf.reshape(\n",
    "      tensor=mahalanobis_dist_flat, shape=[-1, final_shape])\n",
    "\n",
    "  # time_shape = (cur_batch_size, seq_len)\n",
    "  # features_shape = (cur_batch_size, num_feat)\n",
    "  mahalanobis_dist_final_shaped_sqrt = tf.sqrt(x=mahalanobis_dist_final_shaped)\n",
    "\n",
    "  return mahalanobis_dist_final_shaped_sqrt\n",
    "\n",
    "\n",
    "def calculate_error_distribution_statistics_training(\n",
    "    cur_batch_size,\n",
    "    X_time_abs_recon_err,\n",
    "    abs_err_count_time_var,\n",
    "    abs_err_mean_time_var,\n",
    "    abs_err_cov_time_var,\n",
    "    abs_err_inv_cov_time_var,\n",
    "    X_feat_abs_recon_err,\n",
    "    abs_err_count_feat_var,\n",
    "    abs_err_mean_feat_var,\n",
    "    abs_err_cov_feat_var,\n",
    "    abs_err_inv_cov_feat_var,\n",
    "    params,\n",
    "    dummy_var):\n",
    "  \"\"\"Calculates error distribution statistics during training mode.\n",
    "\n",
    "  Given dimensions of inputs, reconstructed inputs' absolute errors, and\n",
    "  variables tracking counts, means, and covariances of error distribution,\n",
    "  returns loss and train_op.\n",
    "\n",
    "  Args:\n",
    "    cur_batch_size: Current batch size, could be partially filled.\n",
    "    X_time_abs_recon_err: Time major reconstructed input data's absolute\n",
    "      reconstruction error.\n",
    "    abs_err_count_time_var: Time major running count of number of records.\n",
    "    abs_err_mean_time_var: Time major running column means of absolute error.\n",
    "    abs_err_cov_time_var: Time major running covariance matrix of absolute\n",
    "      error.\n",
    "    abs_err_inv_cov_time_var: Time major running inverse covariance matrix of\n",
    "    absolute error.\n",
    "    X_feat_abs_recon_err: Feature major reconstructed input data's absolute\n",
    "      reconstruction error.\n",
    "    abs_err_count_feat_var: Feature major running count of number of records.\n",
    "    abs_err_mean_feat_var: Feature major running column means of absolute error.\n",
    "    abs_err_cov_feat_var: Feature major running covariance matrix of absolute\n",
    "      error.\n",
    "    abs_err_inv_cov_feat_var: Feature major running inverse covariance matrix of\n",
    "    absolute error.\n",
    "    params: Dictionary of parameters.\n",
    "    dummy_var: Dummy variable used to allow training mode to happen since it\n",
    "      requires a gradient to tie back to the graph dependency.\n",
    "\n",
    "  Returns:\n",
    "    loss: The scalar loss to tie our updates back to Estimator graph.\n",
    "    train_op: The train operation to tie our updates back to Estimator graph.\n",
    "  \"\"\"\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=\"mahalanobis_dist_vars\", reuse=tf.AUTO_REUSE):\n",
    "    # Time based\n",
    "    singleton_time_condition = tf.equal(\n",
    "        x=cur_batch_size * params[\"seq_len\"], y=1)\n",
    "\n",
    "    cov_time_var, mean_time_var, count_time_var = tf.cond(\n",
    "        pred=singleton_time_condition,\n",
    "        true_fn=lambda: singleton_batch_cov_variable_updating(\n",
    "            params[\"seq_len\"],\n",
    "            X_time_abs_recon_err,\n",
    "            abs_err_count_time_var,\n",
    "            abs_err_mean_time_var,\n",
    "            abs_err_cov_time_var),\n",
    "        false_fn=lambda: non_singleton_batch_cov_variable_updating(\n",
    "            cur_batch_size,\n",
    "            params[\"seq_len\"],\n",
    "            X_time_abs_recon_err,\n",
    "            abs_err_count_time_var,\n",
    "            abs_err_mean_time_var,\n",
    "            abs_err_cov_time_var))\n",
    "\n",
    "    # Features based\n",
    "    singleton_feat_condition = tf.equal(\n",
    "        x=cur_batch_size * params[\"num_feat\"], y=1)\n",
    "\n",
    "    cov_feat_var, mean_feat_var, count_feat_var = tf.cond(\n",
    "        pred=singleton_feat_condition,\n",
    "        true_fn=lambda: singleton_batch_cov_variable_updating(\n",
    "            params[\"num_feat\"],\n",
    "            X_feat_abs_recon_err,\n",
    "            abs_err_count_feat_var,\n",
    "            abs_err_mean_feat_var,\n",
    "            abs_err_cov_feat_var),\n",
    "        false_fn=lambda: non_singleton_batch_cov_variable_updating(\n",
    "            cur_batch_size,\n",
    "            params[\"num_feat\"],\n",
    "            X_feat_abs_recon_err,\n",
    "            abs_err_count_feat_var,\n",
    "            abs_err_mean_feat_var,\n",
    "            abs_err_cov_feat_var))\n",
    "\n",
    "  # Lastly use control dependencies around loss to enforce the mahalanobis\n",
    "  # variables to be assigned, the control order matters, hence the separate\n",
    "  # contexts\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[cov_time_var, cov_feat_var]):\n",
    "    with tf.control_dependencies(\n",
    "        control_inputs=[mean_time_var, mean_feat_var]):\n",
    "      with tf.control_dependencies(\n",
    "          control_inputs=[count_time_var, count_feat_var]):\n",
    "        # Time based\n",
    "        # shape = (num_feat, num_feat)\n",
    "        abs_err_inv_cov_time_tensor = \\\n",
    "          tf.matrix_inverse(input=cov_time_var + \\\n",
    "            tf.eye(num_rows=tf.shape(input=cov_time_var)[0],\n",
    "                   dtype=tf.float64) * params[\"eps\"])\n",
    "        # Features based\n",
    "        # shape = (seq_len, seq_len)\n",
    "        abs_err_inv_cov_feat_tensor = \\\n",
    "          tf.matrix_inverse(input=cov_feat_var + \\\n",
    "            tf.eye(num_rows=tf.shape(input=cov_feat_var)[0],\n",
    "                   dtype=tf.float64) * params[\"eps\"])\n",
    "\n",
    "        with tf.control_dependencies(\n",
    "            control_inputs=[tf.assign(ref=abs_err_inv_cov_time_var,\n",
    "                                      value=abs_err_inv_cov_time_tensor),\n",
    "                            tf.assign(ref=abs_err_inv_cov_feat_var,\n",
    "                                      value=abs_err_inv_cov_feat_tensor)]):\n",
    "          loss = tf.reduce_sum(\n",
    "              input_tensor=tf.zeros(shape=(), dtype=tf.float64) * dummy_var)\n",
    "\n",
    "          train_op = tf.contrib.layers.optimize_loss(\n",
    "              loss=loss,\n",
    "              global_step=tf.train.get_global_step(),\n",
    "              learning_rate=params[\"learning_rate\"],\n",
    "              optimizer=\"SGD\")\n",
    "\n",
    "  return loss, train_op"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## tune_anomaly_threshold_vars.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/tune_anomaly_threshold_vars.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "def create_confusion_matrix_thresh_vars(scope, var_name, size):\n",
    "  \"\"\"Creates confusion matrix threshold variables.\n",
    "\n",
    "  Given variable scope, name, and size, create and return confusion matrix\n",
    "  threshold variables for true positives, false negatives, false positives,\n",
    "  true negatives.\n",
    "\n",
    "  Args:\n",
    "    scope: String of variable scope name.\n",
    "    var_name: String denoting which set of variables to create. Values are\n",
    "      \"time\" and \"feat\".\n",
    "    size: The size of the variable, number of time/feature thresholds.\n",
    "\n",
    "  Returns:\n",
    "    Confusion matrix threshold variables for true positives, false negatives,\n",
    "    false positives, true negatives.\n",
    "  \"\"\"\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=scope, reuse=tf.AUTO_REUSE):\n",
    "    tp_thresh_var = tf.get_variable(\n",
    "        name=\"tp_thresh_{0}_var\".format(var_name),\n",
    "        dtype=tf.int64,\n",
    "        initializer=tf.zeros(\n",
    "            shape=size, dtype=tf.int64),\n",
    "        trainable=False)\n",
    "\n",
    "    fn_thresh_var = tf.get_variable(\n",
    "        name=\"fn_thresh_{0}_var\".format(var_name),\n",
    "        dtype=tf.int64,\n",
    "        initializer=tf.zeros(\n",
    "            shape=size, dtype=tf.int64),\n",
    "        trainable=False)\n",
    "\n",
    "    fp_thresh_var = tf.get_variable(\n",
    "        name=\"fp_thresh_{0}_var\".format(var_name),\n",
    "        dtype=tf.int64,\n",
    "        initializer=tf.zeros(\n",
    "            shape=size, dtype=tf.int64),\n",
    "        trainable=False)\n",
    "\n",
    "    tn_thresh_var = tf.get_variable(\n",
    "        name=\"tn_thresh_{0}_var\".format(var_name),\n",
    "        dtype=tf.int64,\n",
    "        initializer=tf.zeros(\n",
    "            shape=size, dtype=tf.int64),\n",
    "        trainable=False)\n",
    "\n",
    "    return (tp_thresh_var,\n",
    "            fn_thresh_var,\n",
    "            fp_thresh_var,\n",
    "            tn_thresh_var)\n",
    "\n",
    "\n",
    "def create_both_confusion_matrix_thresh_vars(\n",
    "    scope, time_thresh_size, feat_thresh_size):\n",
    "  \"\"\"Creates both time & feature major confusion matrix threshold variables.\n",
    "\n",
    "  Given variable scope and sizes, create and return confusion\n",
    "  matrix threshold variables for true positives, false negatives, false\n",
    "  positives, and true negatives for both time and feature major\n",
    "  representations.\n",
    "\n",
    "  Args:\n",
    "    scope: String of variable scope name.\n",
    "    time_thresh_size: Variable size of number of time major thresholds.\n",
    "    feat_thresh_size: Variable size of number of feature major thresholds.\n",
    "\n",
    "  Returns:\n",
    "    Confusion matrix threshold variables for true positives, false negatives,\n",
    "    false positives, true negatives for both time and feature major\n",
    "    representations.\n",
    "  \"\"\"\n",
    "  # Time based\n",
    "  (tp_thresh_time_var,\n",
    "   fn_thresh_time_var,\n",
    "   fp_thresh_time_var,\n",
    "   tn_thresh_time_var) = create_confusion_matrix_thresh_vars(\n",
    "       scope=scope, var_name=\"time\", size=time_thresh_size)\n",
    "\n",
    "  # Features based\n",
    "  (tp_thresh_feat_var,\n",
    "   fn_thresh_feat_var,\n",
    "   fp_thresh_feat_var,\n",
    "   tn_thresh_feat_var) = create_confusion_matrix_thresh_vars(\n",
    "       scope=scope, var_name=\"feat\", size=feat_thresh_size)\n",
    "\n",
    "  return (tp_thresh_time_var,\n",
    "          fn_thresh_time_var,\n",
    "          fp_thresh_time_var,\n",
    "          tn_thresh_time_var,\n",
    "          tp_thresh_feat_var,\n",
    "          fn_thresh_feat_var,\n",
    "          fp_thresh_feat_var,\n",
    "          tn_thresh_feat_var)\n",
    "\n",
    "\n",
    "def create_mahalanobis_unsupervised_thresh_vars(scope, var_name):\n",
    "  \"\"\"Creates mahalanobis unsupervised threshold variables.\n",
    "\n",
    "  Given variable scope and name, create and return mahalanobis unsupervised\n",
    "  threshold variables of mean and standard deviation.\n",
    "\n",
    "  Args:\n",
    "    scope: String of variable scope name.\n",
    "    var_name: String denoting which set of variables to create. Values are\n",
    "      \"time\" and \"feat\".\n",
    "\n",
    "  Returns:\n",
    "    Mahalanobis unsupervised threshold variables of count, mean, and standard\n",
    "    deviation.\n",
    "  \"\"\"\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=scope, reuse=tf.AUTO_REUSE):\n",
    "    count_thresh_var = tf.get_variable(\n",
    "        name=\"count_thresh_{0}_var\".format(var_name),\n",
    "        dtype=tf.int64,\n",
    "        initializer=tf.zeros(\n",
    "            shape=[], dtype=tf.int64),\n",
    "        trainable=False)\n",
    "\n",
    "    mean_thresh_var = tf.get_variable(\n",
    "        name=\"mean_thresh_{0}_var\".format(var_name),\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(\n",
    "            shape=[], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "    var_thresh_var = tf.get_variable(\n",
    "        name=\"var_thresh_{0}_var\".format(var_name),\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(\n",
    "            shape=[], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "    return (count_thresh_var,\n",
    "            mean_thresh_var,\n",
    "            var_thresh_var)\n",
    "\n",
    "\n",
    "def create_both_mahalanobis_unsupervised_thresh_vars(scope):\n",
    "  \"\"\"Creates time & feature mahalanobis unsupervised threshold variables.\n",
    "\n",
    "  Given variable scope, create and return mahalanobis unsupervised\n",
    "  threshold variables of mean and standard deviation for both time and\n",
    "  feature major representations.\n",
    "\n",
    "  Args:\n",
    "    scope: String of variable scope name.\n",
    "\n",
    "  Returns:\n",
    "    Mahalanobis unsupervised threshold variables of mean and standard\n",
    "    deviation for both time and feature major representations.\n",
    "  \"\"\"\n",
    "  # Time based\n",
    "  (count_thresh_time_var,\n",
    "   mean_thresh_time_var,\n",
    "   var_thresh_time_var) = create_mahalanobis_unsupervised_thresh_vars(\n",
    "       scope=scope, var_name=\"time\")\n",
    "\n",
    "  # Features based\n",
    "  (count_thresh_feat_var,\n",
    "   mean_thresh_feat_var,\n",
    "   var_thresh_feat_var) = create_mahalanobis_unsupervised_thresh_vars(\n",
    "       scope=scope, var_name=\"feat\")\n",
    "\n",
    "  return (count_thresh_time_var,\n",
    "          mean_thresh_time_var,\n",
    "          var_thresh_time_var,\n",
    "          count_thresh_feat_var,\n",
    "          mean_thresh_feat_var,\n",
    "          var_thresh_feat_var)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## tune_anomaly_thresholds_supervised.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/tune_anomaly_thresholds_supervised.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "def calculate_threshold_confusion_matrix(labels_mask, preds, num_thresh):\n",
    "  \"\"\"Calculates confusion matrix based on thresholds.\n",
    "\n",
    "  Given labels mask, predictions, and number of thresholds, returns count\n",
    "  for cell in confusion matrix.\n",
    "\n",
    "  Args:\n",
    "    labels_mask: tf.bool vector tensor when label was normal or\n",
    "      anomalous.\n",
    "    preds: Predicted anomaly labels.\n",
    "    num_thresh: Number of anomaly thresholds to try in parallel grid search.\n",
    "\n",
    "  Returns:\n",
    "    Count for cell in confusion matrix.\n",
    "  \"\"\"\n",
    "  count = tf.reduce_sum(\n",
    "      input_tensor=tf.cast(\n",
    "          x=tf.map_fn(\n",
    "              fn=lambda threshold: tf.logical_and(\n",
    "                  x=labels_mask,\n",
    "                  y=preds[threshold, :]),\n",
    "              elems=tf.range(start=0, limit=num_thresh, dtype=tf.int64),\n",
    "              dtype=tf.bool),\n",
    "          dtype=tf.int64),\n",
    "      axis=1)\n",
    "\n",
    "  return count\n",
    "\n",
    "\n",
    "def update_anom_thresh_vars(\n",
    "    labels_norm_mask,\n",
    "    labels_anom_mask,\n",
    "    num_thresh,\n",
    "    anom_thresh,\n",
    "    mahalanobis_dist,\n",
    "    tp_at_thresh_var,\n",
    "    fn_at_thresh_var,\n",
    "    fp_at_thresh_var,\n",
    "    tn_at_thresh_var,\n",
    "    mode):\n",
    "  \"\"\"Updates anomaly threshold variables.\n",
    "\n",
    "  Given masks for when labels are normal and anomalous, the number of anomaly\n",
    "  thresholds and the thresholds themselves, the mahalanobis distance, variables\n",
    "  for the confusion matrix, and the current Estimator mode, returns the updated\n",
    "  variables for the confusion matrix.\n",
    "\n",
    "  Args:\n",
    "    labels_norm_mask: tf.bool vector tensor that is true when label was normal.\n",
    "    labels_anom_mask: tf.bool vector tensor that is true when label was\n",
    "      anomalous.\n",
    "    num_thresh: Number of anomaly thresholds to try in parallel grid search.\n",
    "    anom_thresh: tf.float64 vector tensor of grid of anomaly thresholds to try.\n",
    "    mahalanobis_dist: tf.float64 matrix tensor of mahalanobis distances across\n",
    "      batch.\n",
    "    tp_at_thresh_var: tf.int64 variable tracking number of true positives at\n",
    "      each possible anomaly threshold.\n",
    "    fn_at_thresh_var: tf.int64 variable tracking number of false negatives at\n",
    "      each possible anomaly threshold.\n",
    "    fp_at_thresh_var: tf.int64 variable tracking number of false positives at\n",
    "      each possible anomaly threshold.\n",
    "    tn_at_thresh_var: tf.int64 variable tracking number of true negatives at\n",
    "      each possible anomaly threshold.\n",
    "    mode: Estimator ModeKeys, can take values of TRAIN and EVAL.\n",
    "\n",
    "  Returns:\n",
    "    Updated confusion matrix variables.\n",
    "  \"\"\"\n",
    "  if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "    # time_shape = (num_time_anom_thresh, cur_batch_size, seq_len)\n",
    "    # feat_shape = (num_feat_anom_thresh, cur_batch_size, num_feat)\n",
    "    mahalanobis_dist_over_thresh = tf.map_fn(\n",
    "        fn=lambda anom_threshold: mahalanobis_dist > anom_threshold,\n",
    "        elems=anom_thresh,\n",
    "        dtype=tf.bool)\n",
    "  else:\n",
    "    # time_shape = (cur_batch_size, seq_len)\n",
    "    # feat_shape = (cur_batch_size, num_feat)\n",
    "    mahalanobis_dist_over_thresh = mahalanobis_dist > anom_thresh\n",
    "\n",
    "  # time_shape = (num_time_anom_thresh, cur_batch_size)\n",
    "  # feat_shape = (num_feat_anom_thresh, cur_batch_size)\n",
    "  mahalanobis_dist_any_over_thresh = tf.reduce_any(\n",
    "      input_tensor=mahalanobis_dist_over_thresh, axis=-1)\n",
    "\n",
    "  if mode == tf.estimator.ModeKeys.EVAL:\n",
    "    # time_shape = (1, cur_batch_size)\n",
    "    # feat_shape = (1, cur_batch_size)\n",
    "    mahalanobis_dist_any_over_thresh = tf.expand_dims(\n",
    "        input=mahalanobis_dist_any_over_thresh, axis=0)\n",
    "\n",
    "  # time_shape = (num_time_anom_thresh, cur_batch_size)\n",
    "  # feat_shape = (num_feat_anom_thresh, cur_batch_size)\n",
    "  predicted_normals = tf.equal(\n",
    "      x=mahalanobis_dist_any_over_thresh, y=False)\n",
    "\n",
    "  # time_shape = (num_time_anom_thresh, cur_batch_size)\n",
    "  # feat_shape = (num_feat_anom_thresh, cur_batch_size)\n",
    "  predicted_anomalies = tf.equal(\n",
    "      x=mahalanobis_dist_any_over_thresh, y=True)\n",
    "\n",
    "  # Calculate confusion matrix of current batch\n",
    "  # time_shape = (num_time_anom_thresh,)\n",
    "  # feat_shape = (num_feat_anom_thresh,)\n",
    "  tp = calculate_threshold_confusion_matrix(\n",
    "      labels_anom_mask, predicted_anomalies, num_thresh)\n",
    "\n",
    "  fn = calculate_threshold_confusion_matrix(\n",
    "      labels_anom_mask, predicted_normals, num_thresh)\n",
    "\n",
    "  fp = calculate_threshold_confusion_matrix(\n",
    "      labels_norm_mask, predicted_anomalies, num_thresh)\n",
    "\n",
    "  tn = calculate_threshold_confusion_matrix(\n",
    "      labels_norm_mask, predicted_normals, num_thresh)\n",
    "\n",
    "  if mode == tf.estimator.ModeKeys.EVAL:\n",
    "    # shape = ()\n",
    "    tp = tf.squeeze(input=tp)\n",
    "    fn = tf.squeeze(input=fn)\n",
    "    fp = tf.squeeze(input=fp)\n",
    "    tn = tf.squeeze(input=tn)\n",
    "\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[tf.assign_add(ref=tp_at_thresh_var, value=tp),\n",
    "                      tf.assign_add(ref=fn_at_thresh_var, value=fn),\n",
    "                      tf.assign_add(ref=fp_at_thresh_var, value=fp),\n",
    "                      tf.assign_add(ref=tn_at_thresh_var, value=tn)]):\n",
    "\n",
    "    return (tf.identity(input=tp_at_thresh_var),\n",
    "            tf.identity(input=fn_at_thresh_var),\n",
    "            tf.identity(input=fp_at_thresh_var),\n",
    "            tf.identity(input=tn_at_thresh_var))\n",
    "\n",
    "\n",
    "def calculate_composite_classification_metrics(tp, fn, fp, tn, f_score_beta):\n",
    "  \"\"\"Calculates compositive classification metrics from the confusion matrix.\n",
    "\n",
    "  Given variables for the confusion matrix and the value of beta for f-beta\n",
    "  score, returns accuracy, precision, recall, and f-beta score composite\n",
    "  metrics.\n",
    "\n",
    "  Args:\n",
    "    tp: tf.int64 variable tracking number of true positives at\n",
    "      each possible anomaly threshold.\n",
    "    fn: tf.int64 variable tracking number of false negatives at\n",
    "      each possible anomaly threshold.\n",
    "    fp: tf.int64 variable tracking number of false positives at\n",
    "      each possible anomaly threshold.\n",
    "    tn: tf.int64 variable tracking number of true negatives at\n",
    "      each possible anomaly threshold.\n",
    "    f_score_beta: Value of beta for f-beta score.\n",
    "\n",
    "  Returns:\n",
    "    Accuracy, precision, recall, and f-beta score composite metric tensors.\n",
    "  \"\"\"\n",
    "  # time_shape = (num_time_anom_thresh,)\n",
    "  # feat_shape = (num_feat_anom_thresh,)\n",
    "  acc = tf.cast(x=tp + tn, dtype=tf.float64) \\\n",
    "    / tf.cast(x=tp + fn + fp + tn, dtype=tf.float64)\n",
    "  tp_float64 = tf.cast(x=tp, dtype=tf.float64)\n",
    "  pre = tp_float64 / tf.cast(x=tp + fp, dtype=tf.float64)\n",
    "  rec = tp_float64 / tf.cast(x=tp + fn, dtype=tf.float64)\n",
    "  f_beta_numerator = (1.0 + f_score_beta ** 2) * (pre * rec)\n",
    "  f_beta_score = f_beta_numerator / (f_score_beta ** 2 * pre + rec)\n",
    "\n",
    "  return acc, pre, rec, f_beta_score\n",
    "\n",
    "\n",
    "def find_best_anom_thresh(\n",
    "    anom_threshs, f_beta_score, anom_thresh_var):\n",
    "  \"\"\"Find best anomaly threshold to use for anomaly classification.\n",
    "\n",
    "  Given vector of anomaly thresholds and the value of beta for f-beta score,\n",
    "  returns updated variable that stores the best anomaly threshold value.\n",
    "\n",
    "  Args:\n",
    "    anom_threshs: tf.float64 vector tensor of grid of anomaly thresholds to try.\n",
    "    f_beta_score: tf.float64 vector tensor of f-beta scores for each anomaly\n",
    "      threshold.\n",
    "    anom_thresh_var: tf.float64 variable that stores anomaly threshold value.\n",
    "\n",
    "  Returns:\n",
    "    Updated variable that stores the anomaly threshold value.\n",
    "  \"\"\"\n",
    "  # shape = ()\n",
    "  best_anom_thresh = tf.gather(\n",
    "      params=anom_threshs, indices=tf.argmax(input=f_beta_score, axis=0))\n",
    "\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[tf.assign(\n",
    "          ref=anom_thresh_var, value=best_anom_thresh)]):\n",
    "\n",
    "    return tf.identity(input=anom_thresh_var)\n",
    "\n",
    "\n",
    "def optimize_anomaly_theshold(\n",
    "    var_name,\n",
    "    labels_norm_mask,\n",
    "    labels_anom_mask,\n",
    "    mahalanobis_dist,\n",
    "    tp_thresh_var,\n",
    "    fn_thresh_var,\n",
    "    fp_thresh_var,\n",
    "    tn_thresh_var,\n",
    "    params,\n",
    "    mode,\n",
    "    anom_thresh_var):\n",
    "  \"\"\"Optimizes anomaly threshold for anomaly classification.\n",
    "\n",
    "  Given variable name, label masks, mahalanobis distance, variables for\n",
    "  confusion matrix, and dictionary of parameters, returns accuracy, precision,\n",
    "  recall, and f-beta score composite metrics.\n",
    "\n",
    "  Args:\n",
    "    var_name: String denoting which set of variables to use. Values are\n",
    "      \"time\" and \"feat\".\n",
    "    labels_norm_mask: tf.bool vector mask of labels for normals.\n",
    "    labels_anom_mask: tf.bool vector mask of labels for anomalies.\n",
    "    mahalanobis_dist: Mahalanobis distance of reconstruction error.\n",
    "    tp_thresh_var: tf.int64 variable to track number of true positives wrt\n",
    "      thresholds.\n",
    "    fn_thresh_var: tf.int64 variable to track number of false negatives wrt\n",
    "      thresholds.\n",
    "    fp_thresh_var: tf.int64 variable to track number of false positives wrt\n",
    "      thresholds.\n",
    "    tn_thresh_var: tf.int64 variable to track number of true negatives wrt\n",
    "      thresholds.\n",
    "    params: Dictionary of parameters.\n",
    "    mode: Estimator ModeKeys, can take values of TRAIN and EVAL.\n",
    "    anom_thresh_var: tf.float64 variable that stores anomaly threshold value.\n",
    "\n",
    "  Returns:\n",
    "    Updated variable that stores the anomaly threshold value\n",
    "  \"\"\"\n",
    "  # shape = (num_anom_thresh,)\n",
    "  anom_threshs = tf.linspace(\n",
    "      start=tf.constant(\n",
    "          value=params[\"min_{}_anom_thresh\".format(var_name)],\n",
    "          dtype=tf.float64),\n",
    "      stop=tf.constant(\n",
    "          value=params[\"max_{}_anom_thresh\".format(var_name)],\n",
    "          dtype=tf.float64),\n",
    "      num=params[\"num_{}_anom_thresh\".format(var_name)])\n",
    "\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=\"mahalanobis_dist_thresh_vars\",\n",
    "      reuse=tf.AUTO_REUSE):\n",
    "    (tp_update_op,\n",
    "     fn_update_op,\n",
    "     fp_update_op,\n",
    "     tn_update_op) = \\\n",
    "      update_anom_thresh_vars(\n",
    "          labels_norm_mask,\n",
    "          labels_anom_mask,\n",
    "          params[\"num_{}_anom_thresh\".format(var_name)],\n",
    "          anom_threshs,\n",
    "          mahalanobis_dist,\n",
    "          tp_thresh_var,\n",
    "          fn_thresh_var,\n",
    "          fp_thresh_var,\n",
    "          tn_thresh_var,\n",
    "          mode)\n",
    "\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[\n",
    "          tp_update_op,\n",
    "          fn_update_op,\n",
    "          fp_update_op,\n",
    "          tn_update_op]):\n",
    "    _, pre, rec, f_beta = \\\n",
    "      calculate_composite_classification_metrics(\n",
    "          tp_thresh_var,\n",
    "          fn_thresh_var,\n",
    "          fp_thresh_var,\n",
    "          tn_thresh_var,\n",
    "          params[\"f_score_beta\"])\n",
    "\n",
    "    with tf.control_dependencies(control_inputs=[pre, rec]):\n",
    "      with tf.control_dependencies(control_inputs=[f_beta]):\n",
    "        best_anom_thresh = find_best_anom_thresh(\n",
    "            anom_threshs,\n",
    "            f_beta,\n",
    "            anom_thresh_var)\n",
    "        with tf.control_dependencies(control_inputs=[best_anom_thresh]):\n",
    "          return tf.identity(input=anom_thresh_var)\n",
    "\n",
    "\n",
    "def set_anom_thresh(user_passed_anom_thresh, anom_thresh_var):\n",
    "  \"\"\"Set anomaly threshold to use for anomaly classification from user input.\n",
    "\n",
    "  Given user passed anomaly threshold returns updated variable that stores\n",
    "  the anomaly threshold value.\n",
    "\n",
    "  Args:\n",
    "    user_passed_anom_thresh: User passed anomaly threshold that overrides\n",
    "      the threshold optimization.\n",
    "    anom_thresh_var: tf.float64 variable that stores anomaly threshold value.\n",
    "\n",
    "  Returns:\n",
    "    Updated variable that stores the anomaly threshold value.\n",
    "  \"\"\"\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[tf.assign(\n",
    "          ref=anom_thresh_var, value=user_passed_anom_thresh)]):\n",
    "\n",
    "    return tf.identity(input=anom_thresh_var)\n",
    "\n",
    "\n",
    "def tune_anomaly_thresholds_supervised_training(\n",
    "    labels_norm_mask,\n",
    "    labels_anom_mask,\n",
    "    mahalanobis_dist_time,\n",
    "    tp_thresh_time_var,\n",
    "    fn_thresh_time_var,\n",
    "    fp_thresh_time_var,\n",
    "    tn_thresh_time_var,\n",
    "    time_anom_thresh_var,\n",
    "    mahalanobis_dist_feat,\n",
    "    tp_thresh_feat_var,\n",
    "    fn_thresh_feat_var,\n",
    "    fp_thresh_feat_var,\n",
    "    tn_thresh_feat_var,\n",
    "    feat_anom_thresh_var,\n",
    "    params,\n",
    "    mode,\n",
    "    dummy_var):\n",
    "  \"\"\"Tunes anomaly thresholds during supervised training mode.\n",
    "\n",
    "  Given label masks, mahalanobis distances, confusion matrices, and anomaly\n",
    "  thresholds, returns loss and train_op.\n",
    "\n",
    "  Args:\n",
    "    labels_norm_mask: tf.bool vector mask of labels for normals.\n",
    "    labels_anom_mask: tf.bool vector mask of labels for anomalies.\n",
    "    mahalanobis_dist_time: Mahalanobis distance, time major.\n",
    "    tp_thresh_time_var: tf.int64 variable to track number of true positives wrt\n",
    "      thresholds for time major case.\n",
    "    fn_thresh_time_var: tf.int64 variable to track number of false negatives wrt\n",
    "      thresholds for time major case.\n",
    "    fp_thresh_time_var: tf.int64 variable to track number of false positives wrt\n",
    "      thresholds for time major case.\n",
    "    tn_thresh_time_var: tf.int64 variable to track number of true negatives wrt\n",
    "      thresholds for time major case.\n",
    "    time_anom_thresh_var: tf.float64 variable to hold the set time anomaly\n",
    "      threshold.\n",
    "    mahalanobis_dist_feat: Mahalanobis distance, features major.\n",
    "    tp_thresh_feat_var: tf.int64 variable to track number of true positives wrt\n",
    "      thresholds for feat major case.\n",
    "    fn_thresh_feat_var: tf.int64 variable to track number of false negatives wrt\n",
    "      thresholds for feat major case.\n",
    "    fp_thresh_feat_var: tf.int64 variable to track number of false positives wrt\n",
    "      thresholds for feat major case.\n",
    "    tn_thresh_feat_var: tf.int64 variable to track number of true negatives wrt\n",
    "      thresholds for feat major case.\n",
    "    feat_anom_thresh_var: tf.float64 variable to hold the set feat anomaly\n",
    "      threshold.\n",
    "    params: Dictionary of parameters.\n",
    "    mode: Estimator ModeKeys. Can take value of only TRAIN.\n",
    "    dummy_var: Dummy variable used to allow training mode to happen since it\n",
    "      requires a gradient to tie back to the graph dependency.\n",
    "\n",
    "  Returns:\n",
    "    loss: The scalar loss to tie our updates back to Estimator graph.\n",
    "    train_op: The train operation to tie our updates back to Estimator graph.\n",
    "  \"\"\"\n",
    "  # Time based\n",
    "  if params[\"time_anom_thresh\"] is None:\n",
    "    best_anom_thresh_time = optimize_anomaly_theshold(\n",
    "        \"time\",\n",
    "        labels_norm_mask,\n",
    "        labels_anom_mask,\n",
    "        mahalanobis_dist_time,\n",
    "        tp_thresh_time_var,\n",
    "        fn_thresh_time_var,\n",
    "        fp_thresh_time_var,\n",
    "        tn_thresh_time_var,\n",
    "        params,\n",
    "        mode,\n",
    "        time_anom_thresh_var)\n",
    "  else:\n",
    "    best_anom_thresh_time = set_anom_thresh(\n",
    "        params[\"time_anom_thresh\"], time_anom_thresh_var)\n",
    "\n",
    "  # Features based\n",
    "  if params[\"feat_anom_thresh\"] is None:\n",
    "    best_anom_thresh_feat = optimize_anomaly_theshold(\n",
    "        \"feat\",\n",
    "        labels_norm_mask,\n",
    "        labels_anom_mask,\n",
    "        mahalanobis_dist_feat,\n",
    "        tp_thresh_feat_var,\n",
    "        fn_thresh_feat_var,\n",
    "        fp_thresh_feat_var,\n",
    "        tn_thresh_feat_var,\n",
    "        params,\n",
    "        mode,\n",
    "        feat_anom_thresh_var)\n",
    "  else:\n",
    "    best_anom_thresh_feat = set_anom_thresh(\n",
    "        params[\"feat_anom_thresh\"], feat_anom_thresh_var)\n",
    "\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[best_anom_thresh_time,\n",
    "                      best_anom_thresh_feat]):\n",
    "    loss = tf.reduce_sum(\n",
    "        input_tensor=tf.zeros(\n",
    "            shape=(), dtype=tf.float64) * dummy_var)\n",
    "\n",
    "    train_op = tf.contrib.layers.optimize_loss(\n",
    "        loss=loss,\n",
    "        global_step=tf.train.get_global_step(),\n",
    "        learning_rate=params[\"learning_rate\"],\n",
    "        optimizer=\"SGD\")\n",
    "\n",
    "    return loss, train_op\n",
    "\n",
    "\n",
    "def tune_anomaly_thresholds_supervised_eval(\n",
    "    labels_norm_mask,\n",
    "    labels_anom_mask,\n",
    "    time_anom_thresh_var,\n",
    "    mahalanobis_dist_time,\n",
    "    tp_thresh_eval_time_var,\n",
    "    fn_thresh_eval_time_var,\n",
    "    fp_thresh_eval_time_var,\n",
    "    tn_thresh_eval_time_var,\n",
    "    feat_anom_thresh_var,\n",
    "    mahalanobis_dist_feat,\n",
    "    tp_thresh_eval_feat_var,\n",
    "    fn_thresh_eval_feat_var,\n",
    "    fp_thresh_eval_feat_var,\n",
    "    tn_thresh_eval_feat_var,\n",
    "    params,\n",
    "    mode):\n",
    "  \"\"\"Checks tuned anomaly thresholds during supervised evaluation mode.\n",
    "\n",
    "  Given label masks, mahalanobis distances, confusion matrices, and anomaly\n",
    "  thresholds, returns loss and eval_metric_ops.\n",
    "\n",
    "  Args:\n",
    "    labels_norm_mask: tf.bool vector mask of labels for normals.\n",
    "    labels_anom_mask: tf.bool vector mask of labels for anomalies.\n",
    "    time_anom_thresh_var: tf.float64 scalar time anomaly threshold value.\n",
    "    mahalanobis_dist_time: Mahalanobis distance, time major.\n",
    "    tp_thresh_eval_time_var: tf.int64 variable to track number of true\n",
    "      positives wrt thresholds for time major case for evaluation.\n",
    "    fn_thresh_eval_time_var: tf.int64 variable to track number of false\n",
    "      negatives wrt thresholds for time major case for evaluation.\n",
    "    fp_thresh_eval_time_var: tf.int64 variable to track number of false\n",
    "      positives wrt thresholds for time major case for evaluation.\n",
    "    tn_thresh_eval_time_var: tf.int64 variable to track number of true\n",
    "      negatives wrt thresholds for time major case for evaluation.\n",
    "    feat_anom_thresh_var: tf.float64 scalar feature anomaly threshold value.\n",
    "    mahalanobis_dist_feat: Mahalanobis distance, features major.\n",
    "    tp_thresh_eval_feat_var: tf.int64 variable to track number of true\n",
    "      positives wrt thresholds for feat major case for evaluation.\n",
    "    fn_thresh_eval_feat_var: tf.int64 variable to track number of false\n",
    "      negatives wrt thresholds for feat major case for evaluation.\n",
    "    fp_thresh_eval_feat_var: tf.int64 variable to track number of false\n",
    "      positives wrt thresholds for feat major case for evaluation.\n",
    "    tn_thresh_eval_feat_var: tf.int64 variable to track number of true\n",
    "      negatives wrt thresholds for feat major case for evaluation.\n",
    "    params: Dictionary of parameters.\n",
    "    mode: Estimator ModeKeys. Can take value of only EVAL.\n",
    "\n",
    "  Returns:\n",
    "    loss: Scalar reconstruction loss.\n",
    "    eval_metric_ops: Evaluation metrics of threshold tuning.\n",
    "  \"\"\"\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=\"anom_thresh_eval_vars\", reuse=tf.AUTO_REUSE):\n",
    "    # Time based\n",
    "    (tp_time_update_op,\n",
    "     fn_time_update_op,\n",
    "     fp_time_update_op,\n",
    "     tn_time_update_op) = \\\n",
    "      update_anom_thresh_vars(\n",
    "          labels_norm_mask,\n",
    "          labels_anom_mask,\n",
    "          1,\n",
    "          time_anom_thresh_var,\n",
    "          mahalanobis_dist_time,\n",
    "          tp_thresh_eval_time_var,\n",
    "          fn_thresh_eval_time_var,\n",
    "          fp_thresh_eval_time_var,\n",
    "          tn_thresh_eval_time_var,\n",
    "          mode)\n",
    "\n",
    "    # Features based\n",
    "    (tp_feat_update_op,\n",
    "     fn_feat_update_op,\n",
    "     fp_feat_update_op,\n",
    "     tn_feat_update_op) = \\\n",
    "      update_anom_thresh_vars(\n",
    "          labels_norm_mask,\n",
    "          labels_anom_mask,\n",
    "          1,\n",
    "          feat_anom_thresh_var,\n",
    "          mahalanobis_dist_feat,\n",
    "          tp_thresh_eval_feat_var,\n",
    "          fn_thresh_eval_feat_var,\n",
    "          fp_thresh_eval_feat_var,\n",
    "          tn_thresh_eval_feat_var,\n",
    "          mode)\n",
    "\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=\"anom_thresh_eval_vars\", reuse=tf.AUTO_REUSE):\n",
    "    # Time based\n",
    "    (acc_time_update_op,\n",
    "     pre_time_update_op,\n",
    "     rec_time_update_op,\n",
    "     f_beta_time_update_op) = \\\n",
    "      calculate_composite_classification_metrics(\n",
    "          tp_thresh_eval_time_var,\n",
    "          fn_thresh_eval_time_var,\n",
    "          fp_thresh_eval_time_var,\n",
    "          tn_thresh_eval_time_var,\n",
    "          params[\"f_score_beta\"])\n",
    "\n",
    "    # Features based\n",
    "    (acc_feat_update_op,\n",
    "     pre_feat_update_op,\n",
    "     rec_feat_update_op,\n",
    "     f_beta_feat_update_op) = \\\n",
    "      calculate_composite_classification_metrics(\n",
    "          tp_thresh_eval_feat_var,\n",
    "          fn_thresh_eval_feat_var,\n",
    "          fp_thresh_eval_feat_var,\n",
    "          tn_thresh_eval_feat_var,\n",
    "          params[\"f_score_beta\"])\n",
    "\n",
    "  loss = tf.zeros(shape=[], dtype=tf.float64)\n",
    "\n",
    "  # Time based\n",
    "  acc_trues = tf.cast(\n",
    "      x=tp_thresh_eval_time_var + tn_thresh_eval_time_var,\n",
    "      dtype=tf.float64)\n",
    "  acc_falses = tf.cast(\n",
    "      x=fp_thresh_eval_time_var + fn_thresh_eval_time_var,\n",
    "      dtype=tf.float64)\n",
    "  acc_thresh_eval_time_var = acc_trues / (acc_trues + acc_falses)\n",
    "\n",
    "  tp_float = tf.cast(x=tp_thresh_eval_time_var, dtype=tf.float64)\n",
    "\n",
    "  pre_denominator = tf.cast(\n",
    "      x=tp_thresh_eval_time_var + fp_thresh_eval_time_var,\n",
    "      dtype=tf.float64)\n",
    "  pre_thresh_eval_time_var = tp_float / pre_denominator\n",
    "\n",
    "  rec_denominator = tf.cast(\n",
    "      x=tp_thresh_eval_time_var + fn_thresh_eval_time_var,\n",
    "      dtype=tf.float64)\n",
    "  rec_thresh_eval_time_var = tp_float / rec_denominator\n",
    "\n",
    "  f_beta_numerator = (1.0 + params[\"f_score_beta\"] ** 2)\n",
    "  f_beta_numerator *= pre_thresh_eval_time_var\n",
    "  f_beta_numerator *= rec_thresh_eval_time_var\n",
    "  f_beta_denominator = params[\"f_score_beta\"] ** 2\n",
    "  f_beta_denominator *= pre_thresh_eval_time_var\n",
    "  f_beta_denominator += rec_thresh_eval_time_var\n",
    "  f_beta_thresh_eval_time_var = f_beta_numerator / f_beta_denominator\n",
    "\n",
    "  # Features based\n",
    "  acc_trues = tf.cast(\n",
    "      x=tp_thresh_eval_feat_var + tn_thresh_eval_feat_var,\n",
    "      dtype=tf.float64)\n",
    "  acc_falses = tf.cast(\n",
    "      x=fp_thresh_eval_feat_var + fn_thresh_eval_feat_var,\n",
    "      dtype=tf.float64)\n",
    "  acc_thresh_eval_feat_var = acc_trues / (acc_trues + acc_falses)\n",
    "\n",
    "  tp_float = tf.cast(x=tp_thresh_eval_feat_var, dtype=tf.float64)\n",
    "\n",
    "  pre_denominator = tf.cast(\n",
    "      x=tp_thresh_eval_feat_var + fp_thresh_eval_feat_var,\n",
    "      dtype=tf.float64)\n",
    "  pre_thresh_eval_feat_var = tp_float / pre_denominator\n",
    "\n",
    "  rec_denominator = tf.cast(\n",
    "      x=tp_thresh_eval_feat_var + fn_thresh_eval_feat_var,\n",
    "      dtype=tf.float64)\n",
    "  rec_thresh_eval_feat_var = tp_float / rec_denominator\n",
    "\n",
    "  f_beta_numerator = (1.0 + params[\"f_score_beta\"] ** 2)\n",
    "  f_beta_numerator *= pre_thresh_eval_feat_var\n",
    "  f_beta_numerator *= rec_thresh_eval_feat_var\n",
    "  f_beta_denominator = params[\"f_score_beta\"] ** 2\n",
    "  f_beta_denominator *= pre_thresh_eval_feat_var\n",
    "  f_beta_denominator += rec_thresh_eval_feat_var\n",
    "  f_beta_thresh_eval_feat_var = f_beta_numerator / f_beta_denominator\n",
    "\n",
    "  # Anomaly detection eval metrics\n",
    "  eval_metric_ops = {\n",
    "      # Time based\n",
    "      \"time_anom_tp\": (tp_thresh_eval_time_var, tp_time_update_op),\n",
    "      \"time_anom_fn\": (fn_thresh_eval_time_var, fn_time_update_op),\n",
    "      \"time_anom_fp\": (fp_thresh_eval_time_var, fp_time_update_op),\n",
    "      \"time_anom_tn\": (tn_thresh_eval_time_var, tn_time_update_op),\n",
    "\n",
    "      \"time_anom_acc\": (acc_thresh_eval_time_var, acc_time_update_op),\n",
    "      \"time_anom_pre\": (pre_thresh_eval_time_var, pre_time_update_op),\n",
    "      \"time_anom_rec\": (rec_thresh_eval_time_var, rec_time_update_op),\n",
    "      \"time_anom_f_beta\": (f_beta_thresh_eval_time_var,\n",
    "                           f_beta_time_update_op),\n",
    "\n",
    "      # Features based\n",
    "      \"feat_anom_tp\": (tp_thresh_eval_feat_var, tp_feat_update_op),\n",
    "      \"feat_anom_fn\": (fn_thresh_eval_feat_var, fn_feat_update_op),\n",
    "      \"feat_anom_fp\": (fp_thresh_eval_feat_var, fp_feat_update_op),\n",
    "      \"feat_anom_tn\": (tn_thresh_eval_feat_var, tn_feat_update_op),\n",
    "\n",
    "      \"feat_anom_acc\": (acc_thresh_eval_feat_var, acc_feat_update_op),\n",
    "      \"feat_anom_pre\": (pre_thresh_eval_feat_var, pre_feat_update_op),\n",
    "      \"feat_anom_rec\": (rec_thresh_eval_feat_var, rec_feat_update_op),\n",
    "      \"feat_anom_f_beta\": (f_beta_thresh_eval_feat_var,\n",
    "                           f_beta_feat_update_op)\n",
    "  }\n",
    "\n",
    "  return loss, eval_metric_ops"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## tune_anomaly_thresholds_unsupervised.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/tune_anomaly_thresholds_unsupervised.py\n",
    "import tensorflow as tf\n",
    "\n",
    "from .calculate_error_distribution_statistics import non_singleton_batch_var_variable_updating\n",
    "from .calculate_error_distribution_statistics import singleton_batch_var_variable_updating\n",
    "from .predict import flag_anomalies_by_thresholding\n",
    "\n",
    "\n",
    "def tune_anomaly_thresholds_unsupervised_training(\n",
    "    cur_batch_size,\n",
    "    time_anom_thresh_var,\n",
    "    mahalanobis_dist_time,\n",
    "    count_thresh_time_var,\n",
    "    mean_thresh_time_var,\n",
    "    var_thresh_time_var,\n",
    "    feat_anom_thresh_var,\n",
    "    mahalanobis_dist_feat,\n",
    "    count_thresh_feat_var,\n",
    "    mean_thresh_feat_var,\n",
    "    var_thresh_feat_var,\n",
    "    params,\n",
    "    dummy_var):\n",
    "  \"\"\"Tunes anomaly thresholds during unsupervised training mode.\n",
    "\n",
    "  Given dimensions of inputs, mahalanobis distances, and variables tracking\n",
    "  counts, means, and variances of mahalanobis distance, returns loss and\n",
    "  train_op.\n",
    "\n",
    "  Args:\n",
    "    cur_batch_size: Current batch size, could be partially filled.\n",
    "    time_anom_thresh_var: Time anomaly threshold variable.\n",
    "    mahalanobis_dist_time: Time major mahalanobis distance.\n",
    "    count_thresh_time_var: Time major running count of number of records.\n",
    "    mean_thresh_time_var: Time major running mean of mahalanobis distance.\n",
    "    var_thresh_time_var: Time major running variance of mahalanobis distance.\n",
    "    feat_anom_thresh_var: Feature anomaly threshold variable.\n",
    "    mahalanobis_dist_feat: Feature major mahalanobis distance.\n",
    "    count_thresh_feat_var: Feature major running count of number of records.\n",
    "    mean_thresh_feat_var: Feature major running mean of mahalanobis distance.\n",
    "    var_thresh_feat_var: Feature major running variance of mahalanobis distance.\n",
    "    params: Dictionary of parameters.\n",
    "    dummy_var: Dummy variable used to allow training mode to happen since it\n",
    "      requires a gradient to tie back to the graph dependency.\n",
    "\n",
    "  Returns:\n",
    "    loss: The scalar loss to tie our updates back to Estimator graph.\n",
    "    train_op: The train operation to tie our updates back to Estimator graph.\n",
    "  \"\"\"\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=\"mahalanobis_dist_thresh_vars\", reuse=tf.AUTO_REUSE):\n",
    "    # Time based\n",
    "    mahalanobis_dist_time_flat = tf.reshape(\n",
    "        tensor=mahalanobis_dist_time,\n",
    "        shape=[cur_batch_size * params[\"seq_len\"]])\n",
    "\n",
    "    singleton_time_condition = tf.equal(\n",
    "        x=cur_batch_size * params[\"seq_len\"], y=1)\n",
    "\n",
    "    var_time_var, mean_time_var, count_time_var = tf.cond(\n",
    "        pred=singleton_time_condition,\n",
    "        true_fn=lambda: singleton_batch_var_variable_updating(\n",
    "            params[\"seq_len\"],\n",
    "            mahalanobis_dist_time_flat,\n",
    "            count_thresh_time_var,\n",
    "            mean_thresh_time_var,\n",
    "            var_thresh_time_var),\n",
    "        false_fn=lambda: non_singleton_batch_var_variable_updating(\n",
    "            cur_batch_size,\n",
    "            params[\"seq_len\"],\n",
    "            mahalanobis_dist_time_flat,\n",
    "            count_thresh_time_var,\n",
    "            mean_thresh_time_var,\n",
    "            var_thresh_time_var))\n",
    "\n",
    "    # Features based\n",
    "    mahalanobis_dist_feat_flat = tf.reshape(\n",
    "        tensor=mahalanobis_dist_feat,\n",
    "        shape=[cur_batch_size * params[\"num_feat\"]])\n",
    "\n",
    "    singleton_feat_condition = tf.equal(\n",
    "        x=cur_batch_size * params[\"num_feat\"], y=1)\n",
    "\n",
    "    var_feat_var, mean_feat_var, count_feat_var = tf.cond(\n",
    "        pred=singleton_feat_condition,\n",
    "        true_fn=lambda: singleton_batch_var_variable_updating(\n",
    "            params[\"num_feat\"],\n",
    "            mahalanobis_dist_feat_flat,\n",
    "            count_thresh_feat_var,\n",
    "            mean_thresh_feat_var,\n",
    "            var_thresh_feat_var),\n",
    "        false_fn=lambda: non_singleton_batch_var_variable_updating(\n",
    "            cur_batch_size,\n",
    "            params[\"num_feat\"],\n",
    "            mahalanobis_dist_feat_flat,\n",
    "            count_thresh_feat_var,\n",
    "            mean_thresh_feat_var,\n",
    "            var_thresh_feat_var))\n",
    "\n",
    "  # Lastly use control dependencies around loss to enforce the mahalanobis\n",
    "  # variables to be assigned, the control order matters, hence the separate\n",
    "  # contexts.\n",
    "  with tf.control_dependencies(\n",
    "      control_inputs=[var_time_var, var_feat_var]):\n",
    "    with tf.control_dependencies(\n",
    "        control_inputs=[mean_time_var, mean_feat_var]):\n",
    "      with tf.control_dependencies(\n",
    "          control_inputs=[count_time_var, count_feat_var]):\n",
    "        time_out = mean_time_var\n",
    "        time_out += params[\"time_thresh_scl\"] * tf.sqrt(x=var_time_var)\n",
    "        feat_out = mean_feat_var\n",
    "        feat_out += params[\"feat_thresh_scl\"] * tf.sqrt(x=var_feat_var)\n",
    "        with tf.control_dependencies(\n",
    "            control_inputs=[tf.assign(ref=time_anom_thresh_var,\n",
    "                                      value=time_out),\n",
    "                            tf.assign(ref=feat_anom_thresh_var,\n",
    "                                      value=feat_out)]):\n",
    "\n",
    "          loss = tf.reduce_sum(\n",
    "              input_tensor=tf.zeros(shape=(), dtype=tf.float64) * dummy_var)\n",
    "\n",
    "          train_op = tf.contrib.layers.optimize_loss(\n",
    "              loss=loss,\n",
    "              global_step=tf.train.get_global_step(),\n",
    "              learning_rate=params[\"learning_rate\"],\n",
    "              optimizer=\"SGD\")\n",
    "\n",
    "  return loss, train_op\n",
    "\n",
    "\n",
    "def tune_anomaly_thresholds_unsupervised_eval(\n",
    "    cur_batch_size,\n",
    "    time_anom_thresh_var,\n",
    "    mahalanobis_dist_time,\n",
    "    feat_anom_thresh_var,\n",
    "    mahalanobis_dist_feat):\n",
    "  \"\"\"Checks tuned anomaly thresholds during supervised evaluation mode.\n",
    "\n",
    "  Given dimensions of inputs, mahalanobis distances, and variables tracking\n",
    "  counts, means, and variances of mahalanobis distance, returns loss and\n",
    "  train_op.\n",
    "\n",
    "  Args:\n",
    "    cur_batch_size: Current batch size, could be partially filled.\n",
    "    time_anom_thresh_var: Time anomaly threshold variable.\n",
    "    mahalanobis_dist_time: Time major mahalanobis distance.\n",
    "    feat_anom_thresh_var: Feature anomaly threshold variable.\n",
    "    mahalanobis_dist_feat: Feature major mahalanobis distance.\n",
    "\n",
    "  Returns:\n",
    "    loss: The scalar loss to tie our updates back to Estimator graph.\n",
    "    eval_metric_ops: Evaluation metrics of threshold tuning.\n",
    "  \"\"\"\n",
    "  loss = tf.zeros(shape=[], dtype=tf.float64)\n",
    "\n",
    "  # Flag predictions as either normal or anomalous\n",
    "  # shape = (cur_batch_size,)\n",
    "  time_anom_flags = flag_anomalies_by_thresholding(\n",
    "      cur_batch_size, mahalanobis_dist_time, time_anom_thresh_var)\n",
    "\n",
    "  # shape = (cur_batch_size,)\n",
    "  feat_anom_flags = flag_anomalies_by_thresholding(\n",
    "      cur_batch_size, mahalanobis_dist_feat, feat_anom_thresh_var)\n",
    "\n",
    "  # Anomaly detection eval metrics\n",
    "  eval_metric_ops = {\n",
    "      # Time based\n",
    "      \"time_anom_tp\": tf.metrics.mean(values=time_anom_flags),\n",
    "\n",
    "      # Features based\n",
    "      \"feat_anom_tp\": tf.metrics.mean(values=feat_anom_flags)\n",
    "  }\n",
    "\n",
    "  return loss, eval_metric_ops"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## predict.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/predict.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "def flag_anomalies_by_thresholding(\n",
    "    cur_batch_size, mahalanobis_dist, anom_thresh_var):\n",
    "  \"\"\"Flags anomalies by thresholding.\n",
    "\n",
    "  Given current batch size, mahalanobis distance, and anomaly threshold\n",
    "  variable, return predicted anomaly flags.\n",
    "\n",
    "  Args:\n",
    "    cur_batch_size: Current batch size, could be partially filled.\n",
    "    mahalanobis_dist: Mahalanobis distance.\n",
    "    anom_thresh_var: Anomaly threshold variable.\n",
    "\n",
    "  Returns:\n",
    "    anomaly_flags: tf.int64 vector of current batch size elements of\n",
    "    0's and 1's indicating if each sequence is anomalous or not.\n",
    "  \"\"\"\n",
    "  anom_flags = tf.where(\n",
    "      condition=tf.reduce_any(\n",
    "          input_tensor=tf.greater(\n",
    "              x=tf.abs(x=mahalanobis_dist),\n",
    "              y=anom_thresh_var),\n",
    "          axis=1),\n",
    "      x=tf.ones(shape=[cur_batch_size], dtype=tf.int64),\n",
    "      y=tf.zeros(shape=[cur_batch_size], dtype=tf.int64))\n",
    "\n",
    "  return anom_flags\n",
    "\n",
    "\n",
    "def anomaly_detection_predictions(\n",
    "    cur_batch_size,\n",
    "    seq_len,\n",
    "    num_feat,\n",
    "    mahalanobis_dist_time,\n",
    "    mahalanobis_dist_feat,\n",
    "    time_anom_thresh_var,\n",
    "    feat_anom_thresh_var,\n",
    "    X_time_abs_recon_err,\n",
    "    X_feat_abs_recon_err):\n",
    "  \"\"\"Creates Estimator predictions and export outputs.\n",
    "\n",
    "  Given dimensions of inputs, mahalanobis distances and their respective\n",
    "  thresholds, and reconstructed inputs' absolute errors, returns Estimator's\n",
    "  predictions and export outputs.\n",
    "\n",
    "  Args:\n",
    "    cur_batch_size: Current batch size, could be partially filled.\n",
    "    seq_len: Number of timesteps in sequence.\n",
    "    num_feat: Number of features.\n",
    "    mahalanobis_dist_time: Mahalanobis distance, time major.\n",
    "    mahalanobis_dist_feat: Mahalanobis distance, features major.\n",
    "    time_anom_thresh_var: Time anomaly threshold variable.\n",
    "    feat_anom_thresh_var: Features anomaly threshold variable.\n",
    "    X_time_abs_recon_err: Time major reconstructed input data's absolute\n",
    "      reconstruction error.\n",
    "    X_feat_abs_recon_err: Features major reconstructed input data's absolute\n",
    "      reconstruction error.\n",
    "\n",
    "  Returns:\n",
    "    predictions_dict: Dictionary of predictions to output for local prediction.\n",
    "    export_outputs: Dictionary to output from exported model for serving.\n",
    "  \"\"\"\n",
    "  # Flag predictions as either normal or anomalous\n",
    "  # shape = (cur_batch_size,)\n",
    "  time_anom_flags = flag_anomalies_by_thresholding(\n",
    "      cur_batch_size, mahalanobis_dist_time, time_anom_thresh_var)\n",
    "\n",
    "  # shape = (cur_batch_size,)\n",
    "  feat_anom_flags = flag_anomalies_by_thresholding(\n",
    "      cur_batch_size, mahalanobis_dist_feat, feat_anom_thresh_var)\n",
    "\n",
    "  # Create predictions dictionary\n",
    "  predictions_dict = {\n",
    "      \"X_time_abs_recon_err\": tf.reshape(\n",
    "          tensor=X_time_abs_recon_err,\n",
    "          shape=[cur_batch_size, seq_len, num_feat]),\n",
    "      \"X_feat_abs_recon_err\": tf.transpose(\n",
    "          a=tf.reshape(\n",
    "              tensor=X_feat_abs_recon_err,\n",
    "              shape=[cur_batch_size, num_feat, seq_len]),\n",
    "          perm=[0, 2, 1]),\n",
    "      \"mahalanobis_dist_time\": mahalanobis_dist_time,\n",
    "      \"mahalanobis_dist_feat\": mahalanobis_dist_feat,\n",
    "      \"time_anom_thresh_var\": tf.fill(\n",
    "          dims=[cur_batch_size], value=time_anom_thresh_var),\n",
    "      \"feat_anom_thresh_var\": tf.fill(\n",
    "          dims=[cur_batch_size], value=feat_anom_thresh_var),\n",
    "      \"time_anom_flags\": time_anom_flags,\n",
    "      \"feat_anom_flags\": feat_anom_flags}\n",
    "\n",
    "  # Create export outputs\n",
    "  export_outputs = {\n",
    "      \"predict_export_outputs\": tf.estimator.export.PredictOutput(\n",
    "          outputs=predictions_dict)\n",
    "  }\n",
    "\n",
    "  return predictions_dict, export_outputs"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## anomaly_detection.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/anomaly_detection.py\n",
    "import tensorflow as tf\n",
    "\n",
    "from .autoencoder_dense import dense_autoencoder_model\n",
    "from .autoencoder_lstm import lstm_enc_dec_autoencoder_model\n",
    "from .autoencoder_pca import pca_model\n",
    "from .calculate_error_distribution_statistics import calculate_error_distribution_statistics_training\n",
    "from .calculate_error_distribution_statistics import mahalanobis_dist\n",
    "from .error_distribution_vars import create_both_mahalanobis_dist_vars\n",
    "from .predict import anomaly_detection_predictions\n",
    "from .reconstruction import reconstruction_evaluation\n",
    "from .tune_anomaly_threshold_vars import create_both_confusion_matrix_thresh_vars\n",
    "from .tune_anomaly_threshold_vars import create_both_mahalanobis_unsupervised_thresh_vars\n",
    "from .tune_anomaly_thresholds_supervised import tune_anomaly_thresholds_supervised_training\n",
    "from .tune_anomaly_thresholds_supervised import tune_anomaly_thresholds_supervised_eval\n",
    "from .tune_anomaly_thresholds_unsupervised import tune_anomaly_thresholds_unsupervised_training\n",
    "from .tune_anomaly_thresholds_unsupervised import tune_anomaly_thresholds_unsupervised_eval\n",
    "\n",
    "\n",
    "# Create our model function to be used in our custom estimator\n",
    "def anomaly_detection(features, labels, mode, params):\n",
    "  \"\"\"Custom Estimator model function for anomaly detection.\n",
    "\n",
    "  Given dictionary of feature tensors, labels tensor, Estimator mode, and\n",
    "  dictionary for parameters, return EstimatorSpec object for custom Estimator.\n",
    "\n",
    "  Args:\n",
    "    features: Dictionary of feature tensors.\n",
    "    labels: Labels tensor or None.\n",
    "    mode: Estimator ModeKeys. Can take values of TRAIN, EVAL, and PREDICT.\n",
    "    params: Dictionary of parameters.\n",
    "\n",
    "  Returns:\n",
    "    EstimatorSpec object.\n",
    "  \"\"\"\n",
    "  print(\"\\nanomaly_detection: features = \\n{}\".format(features))\n",
    "  print(\"anomaly_detection: labels = \\n{}\".format(labels))\n",
    "  print(\"anomaly_detection: mode = \\n{}\".format(mode))\n",
    "  print(\"anomaly_detection: params = \\n{}\".format(params))\n",
    "\n",
    "  # Get input sequence tensor into correct shape\n",
    "  # Get dynamic batch size in case there was a partially filled batch\n",
    "  cur_batch_size = tf.shape(\n",
    "      input=features[params[\"feat_names\"][0]], out_type=tf.int64)[0]\n",
    "\n",
    "  # Stack all of the features into a 3-D tensor\n",
    "  # shape = (cur_batch_size, seq_len, num_feat)\n",
    "  X = tf.stack(\n",
    "      values=[features[key] for key in params[\"feat_names\"]], axis=2)\n",
    "\n",
    "  ##############################################################################\n",
    "  \n",
    "  # Important to note that flags determining which variables should be created \n",
    "  # need to remain the same through all stages or else they won't be in the\n",
    "  # checkpoint.\n",
    "\n",
    "  # Variables for calculating error distribution statistics\n",
    "  (abs_err_count_time_var,\n",
    "   abs_err_mean_time_var,\n",
    "   abs_err_cov_time_var,\n",
    "   abs_err_inv_cov_time_var,\n",
    "   abs_err_count_feat_var,\n",
    "   abs_err_mean_feat_var,\n",
    "   abs_err_cov_feat_var,\n",
    "   abs_err_inv_cov_feat_var) = create_both_mahalanobis_dist_vars(\n",
    "       seq_len=params[\"seq_len\"], num_feat=params[\"num_feat\"])\n",
    "\n",
    "  # Variables for automatically tuning anomaly thresh\n",
    "  if params[\"labeled_tune_thresh\"]:\n",
    "    (tp_thresh_time_var,\n",
    "     fn_thresh_time_var,\n",
    "     fp_thresh_time_var,\n",
    "     tn_thresh_time_var,\n",
    "     tp_thresh_feat_var,\n",
    "     fn_thresh_feat_var,\n",
    "     fp_thresh_feat_var,\n",
    "     tn_thresh_feat_var) = create_both_confusion_matrix_thresh_vars(\n",
    "         scope=\"mahalanobis_dist_thresh_vars\",\n",
    "         time_thresh_size=[params[\"num_time_anom_thresh\"]],\n",
    "         feat_thresh_size=[params[\"num_feat_anom_thresh\"]])\n",
    "  else:\n",
    "    (count_thresh_time_var,\n",
    "     mean_thresh_time_var,\n",
    "     var_thresh_time_var,\n",
    "     count_thresh_feat_var,\n",
    "     mean_thresh_feat_var,\n",
    "     var_thresh_feat_var) = create_both_mahalanobis_unsupervised_thresh_vars(\n",
    "         scope=\"mahalanobis_dist_thresh_vars\")\n",
    "\n",
    "  with tf.variable_scope(\n",
    "      name_or_scope=\"mahalanobis_dist_thresh_vars\", reuse=tf.AUTO_REUSE):\n",
    "    time_anom_thresh_var = tf.get_variable(\n",
    "        name=\"time_anom_thresh_var\",\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(shape=[], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "    feat_anom_thresh_var = tf.get_variable(\n",
    "        name=\"feat_anom_thresh_var\",\n",
    "        dtype=tf.float64,\n",
    "        initializer=tf.zeros(shape=[], dtype=tf.float64),\n",
    "        trainable=False)\n",
    "\n",
    "  # Variables for tuning anomaly thresh evaluation\n",
    "  if params[\"labeled_tune_thresh\"]:\n",
    "    (tp_thresh_eval_time_var,\n",
    "     fn_thresh_eval_time_var,\n",
    "     fp_thresh_eval_time_var,\n",
    "     tn_thresh_eval_time_var,\n",
    "     tp_thresh_eval_feat_var,\n",
    "     fn_thresh_eval_feat_var,\n",
    "     fp_thresh_eval_feat_var,\n",
    "     tn_thresh_eval_feat_var) = create_both_confusion_matrix_thresh_vars(\n",
    "         scope=\"anom_thresh_eval_vars\",\n",
    "         time_thresh_size=[],\n",
    "         feat_thresh_size=[])\n",
    "\n",
    "  # Create dummy variable for graph dependency requiring a gradient for TRAIN\n",
    "  dummy_var = tf.get_variable(\n",
    "      name=\"dummy_var\",\n",
    "      dtype=tf.float64,\n",
    "      initializer=tf.zeros(shape=[], dtype=tf.float64),\n",
    "      trainable=True)\n",
    "\n",
    "################################################################################\n",
    "\n",
    "  predictions_dict = None\n",
    "  loss = None\n",
    "  train_op = None\n",
    "  eval_metric_ops = None\n",
    "  export_outputs = None\n",
    "\n",
    "  # Now branch off based on which mode we are in\n",
    "\n",
    "  # Call specific model\n",
    "  model_functions = {\n",
    "      \"dense_autoencoder\": dense_autoencoder_model,\n",
    "      \"lstm_enc_dec_autoencoder\": lstm_enc_dec_autoencoder_model,\n",
    "      \"pca\": pca_model}\n",
    "\n",
    "  # Get function pointer for selected model type\n",
    "  model_function = model_functions[params[\"model_type\"]]\n",
    "\n",
    "  # Build selected model\n",
    "  loss, train_op, X_time_orig, X_time_recon, X_feat_orig, X_feat_recon = \\\n",
    "    model_function(X, mode, params, cur_batch_size, dummy_var)\n",
    "\n",
    "  if not (mode == tf.estimator.ModeKeys.TRAIN and\n",
    "          params[\"training_mode\"] == \"reconstruction\"):\n",
    "    # shape = (cur_batch_size * seq_len, num_feat)\n",
    "    X_time_abs_recon_err = tf.abs(\n",
    "        x=X_time_orig - X_time_recon)\n",
    "\n",
    "    # Features based\n",
    "    # shape = (cur_batch_size * num_feat, seq_len)\n",
    "    X_feat_abs_recon_err = tf.abs(\n",
    "        x=X_feat_orig - X_feat_recon)\n",
    "\n",
    "    if (mode == tf.estimator.ModeKeys.TRAIN and\n",
    "        params[\"training_mode\"] == \"calculate_error_distribution_statistics\"):\n",
    "      loss, train_op = calculate_error_distribution_statistics_training(\n",
    "          cur_batch_size,\n",
    "          X_time_abs_recon_err,\n",
    "          abs_err_count_time_var,\n",
    "          abs_err_mean_time_var,\n",
    "          abs_err_cov_time_var,\n",
    "          abs_err_inv_cov_time_var,\n",
    "          X_feat_abs_recon_err,\n",
    "          abs_err_count_feat_var,\n",
    "          abs_err_mean_feat_var,\n",
    "          abs_err_cov_feat_var,\n",
    "          abs_err_inv_cov_feat_var,\n",
    "          params,\n",
    "          dummy_var)\n",
    "    elif (mode == tf.estimator.ModeKeys.EVAL and\n",
    "          params[\"training_mode\"] != \"tune_anomaly_thresholds\"):\n",
    "      loss, eval_metric_ops = reconstruction_evaluation(\n",
    "          X_time_orig, X_time_recon, params[\"training_mode\"])\n",
    "    elif (mode == tf.estimator.ModeKeys.PREDICT or\n",
    "          ((mode == tf.estimator.ModeKeys.TRAIN or\n",
    "            mode == tf.estimator.ModeKeys.EVAL) and\n",
    "           params[\"training_mode\"] == \"tune_anomaly_thresholds\")):\n",
    "      with tf.variable_scope(\n",
    "          name_or_scope=\"mahalanobis_dist_vars\", reuse=tf.AUTO_REUSE):\n",
    "        # Time based\n",
    "        # shape = (cur_batch_size, seq_len)\n",
    "        mahalanobis_dist_time = mahalanobis_dist(\n",
    "            err_vec=X_time_abs_recon_err,\n",
    "            mean_vec=abs_err_mean_time_var,\n",
    "            inv_cov=abs_err_inv_cov_time_var,\n",
    "            final_shape=params[\"seq_len\"])\n",
    "\n",
    "        # Features based\n",
    "        # shape = (cur_batch_size, num_feat)\n",
    "        mahalanobis_dist_feat = mahalanobis_dist(\n",
    "            err_vec=X_feat_abs_recon_err,\n",
    "            mean_vec=abs_err_mean_feat_var,\n",
    "            inv_cov=abs_err_inv_cov_feat_var,\n",
    "            final_shape=params[\"num_feat\"])\n",
    "\n",
    "      if mode != tf.estimator.ModeKeys.PREDICT:\n",
    "        if params[\"labeled_tune_thresh\"]:\n",
    "          labels_norm_mask = tf.equal(x=labels, y=0)\n",
    "          labels_anom_mask = tf.equal(x=labels, y=1)\n",
    "\n",
    "          if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "            loss, train_op = tune_anomaly_thresholds_supervised_training(\n",
    "                labels_norm_mask,\n",
    "                labels_anom_mask,\n",
    "                mahalanobis_dist_time,\n",
    "                tp_thresh_time_var,\n",
    "                fn_thresh_time_var,\n",
    "                fp_thresh_time_var,\n",
    "                tn_thresh_time_var,\n",
    "                time_anom_thresh_var,\n",
    "                mahalanobis_dist_feat,\n",
    "                tp_thresh_feat_var,\n",
    "                fn_thresh_feat_var,\n",
    "                fp_thresh_feat_var,\n",
    "                tn_thresh_feat_var,\n",
    "                feat_anom_thresh_var,\n",
    "                params,\n",
    "                mode,\n",
    "                dummy_var)\n",
    "          elif mode == tf.estimator.ModeKeys.EVAL:\n",
    "            loss, eval_metric_ops = tune_anomaly_thresholds_supervised_eval(\n",
    "                labels_norm_mask,\n",
    "                labels_anom_mask,\n",
    "                time_anom_thresh_var,\n",
    "                mahalanobis_dist_time,\n",
    "                tp_thresh_eval_time_var,\n",
    "                fn_thresh_eval_time_var,\n",
    "                fp_thresh_eval_time_var,\n",
    "                tn_thresh_eval_time_var,\n",
    "                feat_anom_thresh_var,\n",
    "                mahalanobis_dist_feat,\n",
    "                tp_thresh_eval_feat_var,\n",
    "                fn_thresh_eval_feat_var,\n",
    "                fp_thresh_eval_feat_var,\n",
    "                tn_thresh_eval_feat_var,\n",
    "                params,\n",
    "                mode)\n",
    "        else:  # not params[\"labeled_tune_thresh\"]\n",
    "          if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "            loss, train_op = tune_anomaly_thresholds_unsupervised_training(\n",
    "                cur_batch_size,\n",
    "                time_anom_thresh_var,\n",
    "                mahalanobis_dist_time,\n",
    "                count_thresh_time_var,\n",
    "                mean_thresh_time_var,\n",
    "                var_thresh_time_var,\n",
    "                feat_anom_thresh_var,\n",
    "                mahalanobis_dist_feat,\n",
    "                count_thresh_feat_var,\n",
    "                mean_thresh_feat_var,\n",
    "                var_thresh_feat_var,\n",
    "                params,\n",
    "                dummy_var)\n",
    "          elif mode == tf.estimator.ModeKeys.EVAL:\n",
    "            loss, eval_metric_ops = tune_anomaly_thresholds_unsupervised_eval(\n",
    "                cur_batch_size,\n",
    "                time_anom_thresh_var,\n",
    "                mahalanobis_dist_time,\n",
    "                feat_anom_thresh_var,\n",
    "                mahalanobis_dist_feat)\n",
    "      else:  # mode == tf.estimator.ModeKeys.PREDICT\n",
    "        predictions_dict, export_outputs = anomaly_detection_predictions(\n",
    "            cur_batch_size,\n",
    "            params[\"seq_len\"],\n",
    "            params[\"num_feat\"],\n",
    "            mahalanobis_dist_time,\n",
    "            mahalanobis_dist_feat,\n",
    "            time_anom_thresh_var,\n",
    "            feat_anom_thresh_var,\n",
    "            X_time_abs_recon_err,\n",
    "            X_feat_abs_recon_err)\n",
    "\n",
    "  # Return EstimatorSpec\n",
    "  return tf.estimator.EstimatorSpec(\n",
    "      mode=mode,\n",
    "      predictions=predictions_dict,\n",
    "      loss=loss,\n",
    "      train_op=train_op,\n",
    "      eval_metric_ops=eval_metric_ops,\n",
    "      export_outputs=export_outputs)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## serving.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/serving.py\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "# Serving input functions\n",
    "def fix_shape_and_type_for_serving(placeholder):\n",
    "  \"\"\"Fixes the shape and type of serving input strings.\n",
    "\n",
    "  Given placeholder tensor, return parsed and processed feature tensor.\n",
    "\n",
    "  Args:\n",
    "    placeholder: Placeholder tensor holding raw data from serving input\n",
    "      function.\n",
    "\n",
    "  Returns:\n",
    "    Parsed and processed feature tensor.\n",
    "  \"\"\"\n",
    "  cur_batch_size = tf.shape(input=placeholder, out_type=tf.int64)[0]\n",
    "\n",
    "  # String split each string in batch and output values from the resulting\n",
    "  # SparseTensors\n",
    "  # shape = (batch_size, seq_len)\n",
    "  split_string = tf.stack(values=tf.map_fn(\n",
    "      fn=lambda x: tf.string_split(\n",
    "          source=[placeholder[x]], delimiter=\";\").values,\n",
    "      elems=tf.range(\n",
    "          start=0, limit=cur_batch_size, dtype=tf.int64),\n",
    "      dtype=tf.string), axis=0)\n",
    "\n",
    "  # Convert each string in the split tensor to float\n",
    "  # shape = (batch_size, seq_len)\n",
    "  feature_tensor = tf.string_to_number(\n",
    "      string_tensor=split_string, out_type=tf.float64)\n",
    "\n",
    "  return feature_tensor\n",
    "\n",
    "\n",
    "def get_shape_and_set_modified_shape_2D(tensor, additional_dimension_sizes):\n",
    "  \"\"\"Fixes the shape and type of serving input strings.\n",
    "\n",
    "  Given feature tensor and additional dimension size, sequence length,\n",
    "  fixes dynamic shape ambiguity of last dimension so that we will be able to\n",
    "  use it in our DNN (since tf.layers.dense require the last dimension to be\n",
    "  known).\n",
    "\n",
    "  Args:\n",
    "    tensor: tf.float64 vector feature tensor.\n",
    "    additional_dimension_sizes: Additional dimension size, namely sequence\n",
    "      length.\n",
    "\n",
    "  Returns:\n",
    "    Feature tensor with set static shape for sequence length.\n",
    "  \"\"\"\n",
    "  # Get static shape for tensor and convert it to list\n",
    "  shape = tensor.get_shape().as_list()\n",
    "  # Set outer shape to additional_dimension_sizes[0] since know this is the\n",
    "  # correct size\n",
    "  shape[1] = additional_dimension_sizes[0]\n",
    "  # Set the shape of tensor to our modified shape\n",
    "  # shape = (batch_size, additional_dimension_sizes[0])\n",
    "  tensor.set_shape(shape=shape)\n",
    "\n",
    "  return tensor\n",
    "\n",
    "\n",
    "def serving_input_fn(feat_names, seq_len):\n",
    "  \"\"\"Serving input function.\n",
    "\n",
    "  Given the sequence length, return ServingInputReceiver object.\n",
    "\n",
    "  Args:\n",
    "    feat_names: List of string names of features.\n",
    "    seq_len: Number of timesteps in sequence.\n",
    "\n",
    "  Returns:\n",
    "    ServingInputReceiver object containing features and receiver tensors.\n",
    "  \"\"\"\n",
    "  # Create placeholders to accept the data sent to the model at serving time\n",
    "  # All features come in as a batch of strings, shape = (batch_size,),\n",
    "  # this was so because of passing the arrays to online ml-engine prediction\n",
    "  feature_placeholders = {\n",
    "      feature: tf.placeholder(\n",
    "          dtype=tf.string, shape=[None])\n",
    "      for feature in feat_names\n",
    "  }\n",
    "\n",
    "  # Create feature tensors\n",
    "  features = {key: fix_shape_and_type_for_serving(placeholder=tensor)\n",
    "              for key, tensor in feature_placeholders.items()}\n",
    "\n",
    "  # Fix dynamic shape ambiguity of feature tensors for our DNN\n",
    "  features = {key: get_shape_and_set_modified_shape_2D(\n",
    "      tensor=tensor, additional_dimension_sizes=[seq_len])\n",
    "              for key, tensor in features.items()}\n",
    "\n",
    "  return tf.estimator.export.ServingInputReceiver(\n",
    "      features=features, receiver_tensors=feature_placeholders)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## model.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/model.py\n",
    "import tensorflow as tf\n",
    "\n",
    "from .anomaly_detection import anomaly_detection\n",
    "from .input import read_dataset\n",
    "from .serving import serving_input_fn\n",
    "\n",
    "# Set logging to be level of INFO\n",
    "tf.logging.set_verbosity(tf.logging.INFO)\n",
    "\n",
    "\n",
    "def train_and_evaluate(args):\n",
    "  \"\"\"Train and evaluate custom Estimator with three training modes.\n",
    "\n",
    "  Given the dictionary of parameters, create custom Estimator and run up to\n",
    "  three training modes then return Estimator object.\n",
    "\n",
    "  Args:\n",
    "    args: Dictionary of parameters.\n",
    "\n",
    "  Returns:\n",
    "    Estimator object.\n",
    "  \"\"\"\n",
    "  # Create our custom estimator using our model function\n",
    "  estimator = tf.estimator.Estimator(\n",
    "      model_fn=anomaly_detection,\n",
    "      model_dir=args[\"output_dir\"],\n",
    "      params={key: val for key, val in args.items()})\n",
    "\n",
    "  if args[\"training_mode\"] == \"reconstruction\":\n",
    "    # Calculate max_steps\n",
    "    max_steps = int(args[\"reconstruction_epochs\"] * args[\"train_examples\"])\n",
    "    max_steps = max_steps // args[\"train_batch_size\"]\n",
    "    max_steps += args[\"previous_train_steps\"]\n",
    "    \n",
    "    # Create eval spec to read in our validation data\n",
    "    eval_spec = tf.estimator.EvalSpec(\n",
    "        input_fn=read_dataset(\n",
    "            filename=args[\"eval_file_pattern\"],\n",
    "            mode=tf.estimator.ModeKeys.EVAL,\n",
    "            batch_size=args[\"eval_batch_size\"],\n",
    "            params=args),\n",
    "        steps=None,\n",
    "        start_delay_secs=args[\"start_delay_secs\"],  # start eval after N secs\n",
    "        throttle_secs=args[\"throttle_secs\"])  # evaluate every N secs\n",
    "\n",
    "    if args[\"model_type\"] == \"pca\":\n",
    "      # Create train spec to read in our training data\n",
    "      train_spec = tf.estimator.TrainSpec(\n",
    "          input_fn=read_dataset(\n",
    "              filename=args[\"train_file_pattern\"],\n",
    "              mode=tf.estimator.ModeKeys.EVAL,  # read through train data once\n",
    "              batch_size=args[\"train_batch_size\"],\n",
    "              params=args),\n",
    "          max_steps=max_steps)\n",
    "      # Check to see if we need to additionally tune principal components\n",
    "      if not args[\"autotune_principal_components\"]:\n",
    "        # Create train and evaluate loop to train and evaluate our estimator\n",
    "        tf.estimator.train_and_evaluate(\n",
    "            estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)\n",
    "      else:\n",
    "          if (args[\"k_principal_components_time\"] is None or\n",
    "              args[\"k_principal_components_feat\"] is None):\n",
    "            # Create train and evaluate loop to train and evaluate our estimator\n",
    "            tf.estimator.train_and_evaluate(\n",
    "                estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)\n",
    "    else:  # dense_autoencoder or lstm_enc_dec_autoencoder\n",
    "      # Create early stopping hook to help reduce overfitting\n",
    "      early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook(\n",
    "          estimator=estimator,\n",
    "          metric_name=\"rmse\",\n",
    "          max_steps_without_decrease=100,\n",
    "          min_steps=1000,\n",
    "          run_every_secs=60,\n",
    "          run_every_steps=None)\n",
    "\n",
    "      # Create train spec to read in our training data\n",
    "      train_spec = tf.estimator.TrainSpec(\n",
    "          input_fn=read_dataset(\n",
    "              filename=args[\"train_file_pattern\"],\n",
    "              mode=tf.estimator.ModeKeys.TRAIN,\n",
    "              batch_size=args[\"train_batch_size\"],\n",
    "              params=args),\n",
    "          max_steps=max_steps,\n",
    "          hooks=[early_stopping_hook])\n",
    "\n",
    "      # Create train and evaluate loop to train and evaluate our estimator\n",
    "      tf.estimator.train_and_evaluate(\n",
    "          estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)\n",
    "  else:\n",
    "    # Calculate max_steps\n",
    "    max_steps = args[\"train_examples\"] // args[\"train_batch_size\"]\n",
    "    max_steps += args[\"previous_train_steps\"]\n",
    "\n",
    "    # if args[\"training_mode\"] == \"calculate_error_distribution_statistics\"\n",
    "    # Get final mahalanobis statistics over the entire val_1 dataset\n",
    "\n",
    "    # if args[\"training_mode\"] == \"tune_anomaly_thresholds\"\n",
    "    # Tune anomaly thresholds using val_2 and val_anom datasets\n",
    "    train_spec = tf.estimator.TrainSpec(\n",
    "        input_fn=read_dataset(\n",
    "            filename=args[\"train_file_pattern\"],\n",
    "            mode=tf.estimator.ModeKeys.EVAL,  # read through val data once\n",
    "            batch_size=args[\"train_batch_size\"],\n",
    "            params=args),\n",
    "        max_steps=max_steps)\n",
    "\n",
    "    if args[\"training_mode\"] == \"calculate_error_distribution_statistics\":\n",
    "      # Don't create exporter for serving yet since anomaly thresholds\n",
    "      # aren't trained yet\n",
    "      exporter = None\n",
    "    elif args[\"training_mode\"] == \"tune_anomaly_thresholds\":\n",
    "      # Create exporter that uses serving_input_fn to create saved_model\n",
    "      # for serving\n",
    "      exporter = tf.estimator.LatestExporter(\n",
    "          name=\"exporter\",\n",
    "          serving_input_receiver_fn=lambda: serving_input_fn(\n",
    "              args[\"feat_names\"], args[\"seq_len\"]))\n",
    "    else:\n",
    "      print(\"{0} isn't a valid training mode!\".format(args[\"training_mode\"]))\n",
    "\n",
    "    # Create eval spec to read in our validation data and export our model\n",
    "    eval_spec = tf.estimator.EvalSpec(\n",
    "        input_fn=read_dataset(\n",
    "            filename=args[\"eval_file_pattern\"],\n",
    "            mode=tf.estimator.ModeKeys.EVAL,\n",
    "            batch_size=args[\"eval_batch_size\"],\n",
    "            params=args),\n",
    "        steps=None,\n",
    "        exporters=exporter,\n",
    "        start_delay_secs=args[\"start_delay_secs\"],  # start eval after N secs\n",
    "        throttle_secs=args[\"throttle_secs\"])  # evaluate every N secs\n",
    "\n",
    "  if (args[\"training_mode\"] == \"calculate_error_distribution_statistics\" or\n",
    "      args[\"training_mode\"] == \"tune_anomaly_thresholds\"):\n",
    "    # Create train and evaluate loop to train and evaluate our estimator\n",
    "    tf.estimator.train_and_evaluate(\n",
    "        estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)\n",
    "\n",
    "  return"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## task.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%writefile anomaly_detection_module/trainer/task.py\n",
    "import argparse\n",
    "import json\n",
    "import os\n",
    "\n",
    "from .model import train_and_evaluate\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "  parser = argparse.ArgumentParser()\n",
    "  # File arguments\n",
    "  parser.add_argument(\n",
    "      \"--train_file_pattern\",\n",
    "      help=\"GCS location to read training data.\",\n",
    "      required=True\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--eval_file_pattern\",\n",
    "      help=\"GCS location to read evaluation data.\",\n",
    "      required=True\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--output_dir\",\n",
    "      help=\"GCS location to write checkpoints and export models.\",\n",
    "      required=True\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--job-dir\",\n",
    "      help=\"This model ignores this field, but it is required by gcloud.\",\n",
    "      default=\"junk\"\n",
    "  )\n",
    "\n",
    "  # Sequence shape hyperparameters\n",
    "  parser.add_argument(\n",
    "      \"--seq_len\",\n",
    "      help=\"Number of timesteps to include in each example.\",\n",
    "      type=int,\n",
    "      default=30\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--num_feat\",\n",
    "      help=\"Number of features for each example.\",\n",
    "      type=int,\n",
    "      default=5\n",
    "  )\n",
    "  \n",
    "  # Feature hyperparameters\n",
    "  parser.add_argument(\n",
    "      \"--feat_names\",\n",
    "      help=\"Names of features.\",\n",
    "      type=str,\n",
    "      required=True\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--feat_defaults\",\n",
    "      help=\"Default values of features.\",\n",
    "      type=str,\n",
    "      required=True\n",
    "  )\n",
    "\n",
    "  # Training parameters\n",
    "  parser.add_argument(\n",
    "      \"--train_batch_size\",\n",
    "      help=\"Number of examples in training batch.\",\n",
    "      type=int,\n",
    "      default=32\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--eval_batch_size\",\n",
    "      help=\"Number of examples in evaluation batch.\",\n",
    "      type=int,\n",
    "      default=32\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--previous_train_steps\",\n",
    "      help=\"Number of batches previously train in other stages.\",\n",
    "      type=int,\n",
    "      default=0\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--reconstruction_epochs\",\n",
    "      help=\"Number of times to go through the reconstruction dataset\",\n",
    "      type=float,\n",
    "      default=1.0\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--train_examples\",\n",
    "      help=\"Number of examples in train file.\",\n",
    "      type=int,\n",
    "      default=1024\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--eval_examples\",\n",
    "      help=\"Number of examples in train file.\",\n",
    "      type=int,\n",
    "      default=1024\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--learning_rate\",\n",
    "      help=\"How quickly or slowly we train our model by scaling the gradient.\",\n",
    "      type=float,\n",
    "      default=0.1\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--start_delay_secs\",\n",
    "      help=\"Number of seconds to wait before first evaluation.\",\n",
    "      type=int,\n",
    "      default=60\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--throttle_secs\",\n",
    "      help=\"Number of seconds to wait between evaluations.\",\n",
    "      type=int,\n",
    "      default=120\n",
    "  )\n",
    "\n",
    "  # Model hyperparameters\n",
    "  # dense_autoencoder, lstm_enc_dec_autoencoder, pca\n",
    "  parser.add_argument(\n",
    "      \"--model_type\",\n",
    "      help=\"Which model type we will use.\",\n",
    "      type=str,\n",
    "      default=\"dense_autoencoder\"\n",
    "  )\n",
    "  ## Dense Autoencoder\n",
    "  parser.add_argument(\n",
    "      \"--enc_dnn_hidden_units\",\n",
    "      help=\"Hidden layer sizes to use for encoder DNN.\",\n",
    "      type=str,\n",
    "      default=\"1024,256,64\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--latent_vector_size\",\n",
    "      help=\"Number of neurons for latent vector between encoder and decoder.\",\n",
    "      type=int,\n",
    "      default=8\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--dec_dnn_hidden_units\",\n",
    "      help=\"Hidden layer sizes to use for decoder DNN.\",\n",
    "      type=str,\n",
    "      default=\"64,256,1024\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--time_loss_weight\",\n",
    "      help=\"Amount to weight the time based loss.\",\n",
    "      type=float,\n",
    "      default=1.0\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--feat_loss_weight\",\n",
    "      help=\"Amount to weight the features based loss.\",\n",
    "      type=float,\n",
    "      default=1.0\n",
    "  )\n",
    "  ## LSTM Encoder-Decoder Autoencoder\n",
    "  parser.add_argument(\n",
    "      \"--reverse_labels_sequence\",\n",
    "      help=\"Whether we should reverse the labels sequence dimension or not.\",\n",
    "      type=str,\n",
    "      default=\"True\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--enc_lstm_hidden_units\",\n",
    "      help=\"Hidden layer sizes to use for LSTM encoder.\",\n",
    "      type=str,\n",
    "      default=\"64,32,16\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--dec_lstm_hidden_units\",\n",
    "      help=\"Hidden layer sizes to use for LSTM decoder.\",\n",
    "      type=str,\n",
    "      default=\"16,32,64\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--lstm_dropout_output_keep_probs\",\n",
    "      help=\"Keep probabilties for LSTM outputs.\",\n",
    "      type=str,\n",
    "      default=\"1.0,1.0,1.0\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--dnn_hidden_units\",\n",
    "      help=\"Hidden layer sizes to use for DNN.\",\n",
    "      type=str,\n",
    "      default=\"1024,256,64\"\n",
    "  )\n",
    "  ## PCA\n",
    "  parser.add_argument(\n",
    "      \"--autotune_principal_components\",\n",
    "      help=\"Whether we should autotune the number of principal components.\",\n",
    "      type=str,\n",
    "      default=\"False\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--k_principal_components_time\",\n",
    "      help=\"Top time k principal components to keep after eigendecomposition.\",\n",
    "      type=int,\n",
    "      default=None\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--k_principal_components_feat\",\n",
    "      help=\"Top feat k principal components to keep after eigendecomposition.\",\n",
    "      type=int,\n",
    "      default=None\n",
    "  )\n",
    "\n",
    "  # Anomaly detection\n",
    "  # reconstruction, calculate_error_distribution_statistics,\n",
    "  # and tune_anomaly_thresholds\n",
    "  parser.add_argument(\n",
    "      \"--training_mode\",\n",
    "      help=\"Which training mode we are in.\",\n",
    "      type=str,\n",
    "      default=\"reconstruction\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--labeled_tune_thresh\",\n",
    "      help=\"If we have a labeled dataset for supervised anomaly tuning.\",\n",
    "      type=str,\n",
    "      default=\"True\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--num_time_anom_thresh\",\n",
    "      help=\"Number of anomaly thresholds to evaluate in time dimension.\",\n",
    "      type=int,\n",
    "      default=120\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--num_feat_anom_thresh\",\n",
    "      help=\"Number of anomaly thresholds to evaluate in features dimension.\",\n",
    "      type=int,\n",
    "      default=120\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--min_time_anom_thresh\",\n",
    "      help=\"Minimum anomaly threshold to evaluate in time dimension.\",\n",
    "      type=float,\n",
    "      default=100.0\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--max_time_anom_thresh\",\n",
    "      help=\"Maximum anomaly threshold to evaluate in time dimension.\",\n",
    "      type=float,\n",
    "      default=2000.0\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--min_feat_anom_thresh\",\n",
    "      help=\"Minimum anomaly threshold to evaluate in features dimension.\",\n",
    "      type=float,\n",
    "      default=100.0\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--max_feat_anom_thresh\",\n",
    "      help=\"Maximum anomaly threshold to evaluate in features dimension.\",\n",
    "      type=float,\n",
    "      default=2000.0\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--time_thresh_scl\",\n",
    "      help=\"Max num of std devs for time mahalanobis distance to be normal.\",\n",
    "      type=float,\n",
    "      default=2.0\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--feat_thresh_scl\",\n",
    "      help=\"Max num of std devs for feature mahalanobis distance to be normal.\",\n",
    "      type=float,\n",
    "      default=2.0\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--time_anom_thresh\",\n",
    "      help=\"Anomaly threshold in time dimension.\",\n",
    "      type=float,\n",
    "      default=None\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--feat_anom_thresh\",\n",
    "      help=\"Anomaly threshold in features dimension.\",\n",
    "      type=float,\n",
    "      default=None\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--eps\",\n",
    "      help=\"Added to the cov matrix before inversion to avoid being singular.\",\n",
    "      type=str,\n",
    "      default=\"1e-12\"\n",
    "  )\n",
    "  parser.add_argument(\n",
    "      \"--f_score_beta\",\n",
    "      help=\"Value of beta of the f-beta score.\",\n",
    "      type=float,\n",
    "      default=0.05\n",
    "  )\n",
    "\n",
    "  # Parse all arguments\n",
    "  args = parser.parse_args()\n",
    "  arguments = args.__dict__\n",
    "\n",
    "  # Unused args provided by service\n",
    "  arguments.pop(\"job_dir\", None)\n",
    "  arguments.pop(\"job-dir\", None)\n",
    "  \n",
    "  # Fix booleans\n",
    "  if arguments[\"reverse_labels_sequence\"].lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n",
    "    arguments[\"reverse_labels_sequence\"] = True\n",
    "  else:\n",
    "    arguments[\"reverse_labels_sequence\"] = False\n",
    "    \n",
    "  if arguments[\"autotune_principal_components\"].lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n",
    "    arguments[\"autotune_principal_components\"] = True\n",
    "  else:\n",
    "    arguments[\"autotune_principal_components\"] = False\n",
    "    \n",
    "  if arguments[\"labeled_tune_thresh\"].lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n",
    "    arguments[\"labeled_tune_thresh\"] = True\n",
    "  else:\n",
    "    arguments[\"labeled_tune_thresh\"] = False\n",
    "\n",
    "  # Fix list arguments\n",
    "  arguments[\"feat_names\"] = arguments[\"feat_names\"].split(\",\")\n",
    "  arguments[\"feat_defaults\"] = [[item] for item in arguments[\"feat_defaults\"].split(\",\")]\n",
    "\n",
    "  ## Dense Autoencoder\n",
    "  arguments[\"enc_dnn_hidden_units\"] = [\n",
    "      int(x) for x in arguments[\"enc_dnn_hidden_units\"].split(\",\")]\n",
    "  arguments[\"dec_dnn_hidden_units\"] = [\n",
    "      int(x) for x in arguments[\"dec_dnn_hidden_units\"].split(\",\")]\n",
    "\n",
    "  ## LSTM Encoder-Decoder Autoencoder\n",
    "  arguments[\"enc_lstm_hidden_units\"] = [\n",
    "      int(x) for x in arguments[\"enc_lstm_hidden_units\"].split(\",\")]\n",
    "  arguments[\"dec_lstm_hidden_units\"] = [\n",
    "      int(x) for x in arguments[\"dec_lstm_hidden_units\"].split(\",\")]\n",
    "  arguments[\"lstm_dropout_output_keep_probs\"] = [\n",
    "      float(x) for x in arguments[\"lstm_dropout_output_keep_probs\"].split(\",\")]\n",
    "  arguments[\"dnn_hidden_units\"] = [\n",
    "      int(x) for x in arguments[\"dnn_hidden_units\"].split(\",\")]\n",
    "\n",
    "  # Fix eps argument\n",
    "  arguments[\"eps\"] = float(arguments[\"eps\"])\n",
    "  \n",
    "  # If doing PCA, then add autotune PC key to dictionary\n",
    "  if arguments[\"model_type\"] == \"pca\":\n",
    "    arguments[\"autotune_principal_components\"] = False\n",
    "\n",
    "  # Append trial_id to path if we are doing hptuning\n",
    "  # This code can be removed if you are not using hyperparameter tuning\n",
    "  arguments[\"output_dir\"] = os.path.join(\n",
    "      arguments[\"output_dir\"],\n",
    "      json.loads(\n",
    "          os.environ.get(\"TF_CONFIG\", \"{}\")\n",
    "          ).get(\"task\", {}).get(\"trial\", \"\")\n",
    "      )\n",
    "\n",
    "  # Run the training job\n",
    "  train_and_evaluate(arguments)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
