{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.8.0\n",
      "1.14.0\n",
      "0.22.0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/envs/py2env/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "# Import libraries and modules\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import shutil\n",
    "print tf.__version__\n",
    "print np.__version__\n",
    "print pd.__version__\n",
    "np.set_printoptions(threshold=np.inf)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# change these to try this notebook out\n",
    "BUCKET = 'youtube8m-4-train'\n",
    "PROJECT = 'qwiklabs-gcp-8d3d0cd07cef9252'\n",
    "REGION = 'us-central1'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Import os environment variables\n",
    "import os\n",
    "os.environ['BUCKET'] = BUCKET\n",
    "os.environ['PROJECT'] = PROJECT\n",
    "os.environ['REGION'] = REGION"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "# Local Development"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Set logging to be level of INFO\n",
    "tf.logging.set_verbosity(tf.logging.INFO)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# The number of video classes\n",
    "NUM_CLASSES = 4716\n",
    "# The number of frames to extract per video\n",
    "MAX_FRAMES = 20"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "arguments = {}\n",
    "arguments[\"train_file_pattern\"] = \"gs://youtube-8m-team/1/frame_level/train/train*.tfrecord\"\n",
    "arguments[\"eval_file_pattern\"] = \"gs://youtube-8m-team/1/frame_level/validate/validate-0.tfrecord\"\n",
    "arguments[\"output_dir\"] = \"trained_model\"\n",
    "arguments[\"batch_size\"] = 10\n",
    "arguments[\"train_steps\"] = 100\n",
    "arguments[\"hidden_units\"] = [1024, 256, 64]\n",
    "arguments[\"top_k\"] = 5\n",
    "arguments[\"start_delay_secs\"] = 60\n",
    "arguments[\"throttle_secs\"] = 30"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Create an input function to read our training and validation data\n",
    "# Then provide the results to the Estimator API\n",
    "def read_dataset_frame(file_pattern, mode, batch_size):\n",
    "  def _input_fn():\n",
    "    print(\"\\nread_dataset_frame: _input_fn: file_pattern = {}\".format(file_pattern))\n",
    "    print(\"read_dataset_frame: _input_fn: mode = {}\".format(mode))\n",
    "    print(\"read_dataset_frame: _input_fn: batch_size = {}\".format(batch_size))\n",
    "\n",
    "    # This function dequantizes our tensors to bring them back to full floating point precision\n",
    "    def dequantize(feat_vector, max_quantized_value = 2, min_quantized_value = -2):\n",
    "      assert max_quantized_value > min_quantized_value # ensure the max value is larger than the min value\n",
    "      quantized_range = max_quantized_value - min_quantized_value # find the range between max and min\n",
    "      scalar = quantized_range / 255.0 # create a scale factor where 0 is the min and 1 is the max\n",
    "      bias = (quantized_range / 512.0) + min_quantized_value # create bias term to shift our scaled feature vector\n",
    "      return feat_vector * scalar + bias # return the scaled and shifted feature vector\n",
    "\n",
    "    # This function resizes our frames axis so that we only get a subset of frames\n",
    "    def resize_axis(tensor, axis, new_size, fill_value = 0):\n",
    "      tensor = tf.convert_to_tensor(value = tensor) # ensure tensor is a tensor\n",
    "      shape = tf.unstack(value = tf.shape(input = tensor)) # create a list where each element is a 1-D tensor the size of each dimension in tensor\n",
    "\n",
    "      pad_shape = shape[:] # create a copy of the shape list of 1-D tensors\n",
    "      pad_shape[axis] = tf.maximum(x = 0, y = new_size - shape[axis]) # change the size of the axis dimension to the maximum of 0 and the new size of our padded shape\n",
    "\n",
    "      shape[axis] = tf.minimum(x = shape[axis], y = new_size) # change the size of the axis dimension to the minimum of our original shape and the new size of our padded shape\n",
    "      shape = tf.stack(values = shape) # stack the list of tensor sizes back into a larger tensor\n",
    "\n",
    "      resized = tf.concat(values = [\n",
    "        tf.slice(input_ = tensor, begin = tf.zeros_like(tensor = shape), size = shape), # slice the tensor starting at the 0th index in each dimension and going as far as our adjusted shape in each dimension\n",
    "        tf.fill(dims = tf.stack(values = pad_shape), value = tf.cast(x = fill_value, dtype = tensor.dtype)) # fill the rest of the tensor with the fill value\n",
    "      ], axis = axis) # concatenate our sliced tensor with our fill value tensor together\n",
    "\n",
    "      new_shape = tensor.get_shape().as_list() # get the static shape of the tensor and output it to a list\n",
    "      new_shape[axis] = new_size # change the static shape's axis to our new size\n",
    "      resized.set_shape(shape = new_shape) # set the static shape of our resized tensor to our new shape\n",
    "      return resized # return the resized tensor\n",
    "\n",
    "    # Read files from file_pattern which provided by args\n",
    "    input_file_names = tf.matching_files(pattern = file_pattern)\n",
    "\n",
    "    # Determine amount of times to repeat file and if we should shuffle the file queue based on if we are training or evaluating\n",
    "    if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "      num_epochs = None # forever\n",
    "      shuffle = True\n",
    "    else:\n",
    "      num_epochs = 1 # until EOF\n",
    "      shuffle = False\n",
    "\n",
    "    # Create filename queue from our input file names\n",
    "    filename_queue = tf.train.string_input_producer(string_tensor = input_file_names, num_epochs = num_epochs, shuffle = shuffle)\n",
    "\n",
    "    # Create a TF Record reader to read in our TF Record files\n",
    "    reader = tf.TFRecordReader()\n",
    "\n",
    "    # Use our TF Record reader to read from the filename queue\n",
    "    queue, serialized_examples = reader.read(queue = filename_queue)\n",
    "    \n",
    "    # Create context and sequence feature map\n",
    "    context_features = {\n",
    "      \"video_id\": tf.FixedLenFeature(shape = [], dtype = tf.string),\n",
    "      \"labels\": tf.VarLenFeature(dtype = tf.int64)\n",
    "    }\n",
    "    sequence_features = {\n",
    "      \"rgb\": tf.FixedLenSequenceFeature(shape = [], dtype = tf.string),\n",
    "      \"audio\": tf.FixedLenSequenceFeature(shape = [], dtype = tf.string)\n",
    "    }\n",
    "\n",
    "    # Parse TF Records into our features\n",
    "    contexts, features = tf.parse_single_sequence_example(\n",
    "      serialized = serialized_examples, \n",
    "      context_features = context_features,\n",
    "      sequence_features = sequence_features)\n",
    "    print(\"read_dataset_frame: _input_fn: contexts = {}\".format(contexts)) # shape = video_id = (), labels = SparseTensor object\n",
    "    print(\"read_dataset_frame: _input_fn: features = {}\".format(features)) # shape = rgb = (frames_per_video,), audio = (frames_per_video,)\n",
    "\n",
    "    # Create features\n",
    "    # Pass video_id to features\n",
    "    features['video_id'] = contexts['video_id'] # shape = video_id = (), rgb = (frames_per_video,), audio = (frames_per_video,)\n",
    "    print(\"read_dataset_frame: _input_fn: features = {}\".format(features))\n",
    "\n",
    "    # Fix rgb data\n",
    "    decoded_rgb = tf.reshape(tensor = tf.cast(x = tf.decode_raw(bytes = features[\"rgb\"], out_type = tf.uint8), dtype = tf.float32), shape = [-1, 1024]) # shape = (frames_per_video, 1024)\n",
    "    print(\"read_dataset_frame: _input_fn: decoded_rgb = {}\".format(decoded_rgb))\n",
    "    rgb_matrix = resize_axis(tensor = dequantize(decoded_rgb), axis = 0, new_size = MAX_FRAMES) # shape = (MAX_FRAMES, 1024)\n",
    "    print(\"read_dataset_frame: _input_fn: rgb_matrix = {}\".format(rgb_matrix))\n",
    "    features['rgb'] = rgb_matrix\n",
    "    print(\"read_dataset_frame: _input_fn: features = {}\".format(features)) # shape = video_id = (), rgb = (MAX_FRAMES, 1024), audio = (frames_per_video,)\n",
    "\n",
    "    # Fix audio data\n",
    "    decoded_audio = tf.reshape(tensor = tf.cast(x = tf.decode_raw(bytes = features[\"audio\"], out_type = tf.uint8), dtype = tf.float32), shape = [-1, 128]) # shape = (frames_per_video, 128)\n",
    "    print(\"read_dataset_frame: _input_fn: decoded_audio = {}\".format(decoded_audio))\n",
    "    audio_matrix = resize_axis(tensor = dequantize(decoded_audio), axis = 0, new_size = MAX_FRAMES) # shape = (MAX_FRAMES, 128)\n",
    "    print(\"read_dataset_frame: _input_fn: audio_matrix = {}\".format(audio_matrix))\n",
    "    features['audio'] = audio_matrix\n",
    "    print(\"read_dataset_frame: _input_fn: features = {}\".format(features)) # shape = video_id = (), rgb = (MAX_FRAMES, 1024), audio = (MAX_FRAMES, 128)\n",
    "\n",
    "    # Add labels to features dictionary and change to correct format from sparse to dense and to floats\n",
    "    features['labels'] = tf.cast(x = tf.sparse_to_dense(sparse_indices = contexts['labels'].values, output_shape = (NUM_CLASSES,), sparse_values = 1, validate_indices = False), dtype = tf.float32)\n",
    "    print(\"read_dataset_frame: _input_fn: features = {}\".format(features)) # shape = video_id = (), rgb = (MAX_FRAMES, 1024), audio = (MAX_FRAMES, 128), labels = (NUM_CLASSES,)\n",
    "\n",
    "    # Shuffle and batch features\n",
    "    batch_features = tf.train.shuffle_batch(\n",
    "      tensors = features, \n",
    "      batch_size = batch_size, \n",
    "      capacity = batch_size * 10, \n",
    "      min_after_dequeue = batch_size,\n",
    "      num_threads = 1,\n",
    "      enqueue_many = False,\n",
    "      allow_smaller_final_batch = True)\n",
    "    print(\"read_dataset_frame: _input_fn: batch_features = {}\".format(batch_features)) # shape = video_id = (batch_size,), rgb = (batch_size, MAX_FRAMES, 1024), audio = (batch_size, MAX_FRAMES, 128), labels = (batch_size, NUM_CLASSES)\n",
    "\n",
    "    # Pop off labels from feature dictionary\n",
    "    batch_labels = batch_features.pop('labels')\n",
    "    print(\"read_dataset_frame: _input_fn: batch_labels = {}\\n\".format(batch_labels)) # shape = (batch_size, NUM_CLASSES)\n",
    "    \n",
    "    return batch_features, batch_labels\n",
    "  return _input_fn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Create our model function to be used in our custom estimator\n",
    "def frame_level_model(features, labels, mode, params):\n",
    "  print(\"\\nframe_level_model: features = {}\".format(features)) # features['rgb'].shape = (batch_size, MAX_FRAMES, 1024), features['audio'].shape = (batch_size, MAX_FRAMES, 128)\n",
    "  print(\"frame_level_model: labels = {}\".format(labels))\n",
    "  print(\"frame_level_model: mode = {}\".format(mode))\n",
    "\n",
    "  # 0. Configure network\n",
    "  # Get dynamic batch size\n",
    "  current_batch_size = tf.shape(features['rgb'])[0]\n",
    "  print(\"frame_level_model: current_batch_size = {}\".format(current_batch_size))\n",
    "\n",
    "  # Stack all of the features into a 3-D tensor\n",
    "  combined_features = tf.concat(values = [features['rgb'], features['audio']], axis = 2) # shape = (current_batch_size, MAX_FRAMES, 1024 + 128)\n",
    "  print(\"frame_level_model: combined_features = {}\".format(combined_features))\n",
    "\n",
    "  # Reshape the combined features into a 2-D tensor that we will pass through our DNN\n",
    "  reshaped_combined_features = tf.reshape(tensor = combined_features, shape = [current_batch_size * MAX_FRAMES, 1024 + 128]) # shape = (current_batch_size * MAX_FRAMES, 1024 + 128)\n",
    "  print(\"frame_level_model: reshaped_combined_features = {}\".format(reshaped_combined_features))\n",
    "\n",
    "  # 1. Create the DNN structure now\n",
    "  # Create the input layer to our frame DNN\n",
    "  network = reshaped_combined_features # shape = (current_batch_size * MAX_FRAMES, 1024 + 128)\n",
    "  print(\"frame_level_model: network = reshaped_combined_features = {}\".format(network))\n",
    "\n",
    "  # Add hidden layers with the given number of units/neurons per layer\n",
    "  for units in params['hidden_units']:\n",
    "    network = tf.layers.dense(inputs = network, units = units, activation = tf.nn.relu) # shape = (current_batch_size * MAX_FRAMES, units)\n",
    "    print(\"frame_level_model: network = {}, units = {}\".format(network, units))\n",
    "\n",
    "  # Connect the final hidden layer to a dense layer with no activation to get the logits\n",
    "  logits = tf.layers.dense(inputs = network, units = NUM_CLASSES, activation = None) # shape = (current_batch_size * MAX_FRAMES, NUM_CLASSES)\n",
    "  print(\"frame_level_model: logits = {}\".format(logits))\n",
    "\n",
    "  # Since this is a multi-class, multi-label problem we will apply a sigmoid, not a softmax, to each logit to get its own probability\n",
    "  probabilities = tf.sigmoid(logits) # shape = (current_batch_size * MAX_FRAMES, NUM_CLASSES)\n",
    "  print(\"frame_level_model: probabilities = {}\".format(probabilities))\n",
    "\n",
    "  # Reshape the probabilities back into a 3-D tensor so that we have a frames axis\n",
    "  reshaped_probabilities = tf.reshape(tensor = probabilities, shape = [current_batch_size, MAX_FRAMES, NUM_CLASSES]) # shape = (current_batch_size, MAX_FRAMES, NUM_CLASSES)\n",
    "  print(\"frame_level_model: reshaped_probabilities = {}\".format(reshaped_probabilities))\n",
    "\n",
    "  # Find the average probability over all frames for each label for each example in the batch\n",
    "  average_probabilities_over_frames = tf.reduce_mean(input_tensor = reshaped_probabilities, axis = 1) # shape = (current_batch_size, NUM_CLASSES)\n",
    "  print(\"frame_level_model: average_probabilities_over_frames = {}\".format(average_probabilities_over_frames))\n",
    "\n",
    "  # Select the top k probabilities in descending order\n",
    "  top_k_probabilities = tf.nn.top_k(input = average_probabilities_over_frames, k = params['top_k'], sorted = True) # shape = (current_batch_size, top_k)\n",
    "  print(\"frame_level_model: top_k_probabilities = {}\".format(top_k_probabilities))\n",
    "\n",
    "  # Find the logits from the average probabilities by using the inverse logit \n",
    "  inverse_probabilities_logits = tf.log(average_probabilities_over_frames + 0.00000001) - tf.log(1.0 - average_probabilities_over_frames + 0.00000001) # shape = (current_batch_size, NUM_CLASSES)\n",
    "  print(\"frame_level_model: inverse_probabilities_logits = {}\".format(inverse_probabilities_logits))\n",
    "\n",
    "  # Select the top k logits using the indices of the top k probabilities in descending order\n",
    "  top_k_logits = tf.map_fn(fn = lambda x: tf.gather(params = inverse_probabilities_logits[x], indices = top_k_probabilities.indices[x]), \n",
    "                                elems = tf.range(start = 0, limit = current_batch_size), \n",
    "                                dtype = tf.float32) # shape = (current_batch_size, 1, top_k)\n",
    "  print(\"frame_level_model: top_k_logits = {}\".format(top_k_logits))\n",
    "\n",
    "  # Select the top k classes in descending order of likelihood\n",
    "  top_k_classes = top_k_probabilities.indices # shape = (current_batch_size, top_k)\n",
    "  print(\"frame_level_model: top_k_classes = {}\".format(top_k_classes))\n",
    "\n",
    "  # The 0/1 predictions based on a threshold, in this case the threshold is if the probability it greater than random chance\n",
    "  predictions = tf.where(\n",
    "    condition = average_probabilities_over_frames > 1.0 / NUM_CLASSES, # shape = (current_batch_size, NUM_CLASSES)\n",
    "    x = tf.ones_like(tensor = average_probabilities_over_frames), \n",
    "    y = tf.zeros_like(tensor = average_probabilities_over_frames))\n",
    "  print(\"frame_level_model: predictions = {}\".format(predictions))\n",
    "\n",
    "  # The 0/1 top k predictions based on a threshold, in this case the threshold is if the probability it greater than random chance\n",
    "  top_k_predictions = tf.where(\n",
    "    condition = top_k_probabilities.values > 1.0 / NUM_CLASSES, # shape = (current_batch_size, top_k)\n",
    "    x = tf.ones_like(tensor = top_k_probabilities.values), \n",
    "    y = tf.zeros_like(tensor = top_k_probabilities.values))\n",
    "  print(\"frame_level_model: top_k_predictions = {}\\n\".format(top_k_predictions))\n",
    "\n",
    "  # 2. Loss function, training/eval ops \n",
    "  if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n",
    "    # Since this is a multi-class, multi-label problem, we will use sigmoid activation and cross entropy loss\n",
    "    # We already have the probabilities we can use the cross entropy formula directly to calculate the loss\n",
    "    loss = tf.reduce_mean(input_tensor = -tf.reduce_sum(input_tensor = labels * tf.log(x = average_probabilities_over_frames + 0.00000001), axis = 1))\n",
    "\n",
    "    train_op = tf.contrib.layers.optimize_loss(\n",
    "      loss = loss,\n",
    "      global_step = tf.train.get_global_step(),\n",
    "      learning_rate = 0.01,\n",
    "      optimizer = \"Adam\")\n",
    "    eval_metric_ops = {\n",
    "        \"accuracy\": tf.metrics.mean_per_class_accuracy(labels = labels, predictions = predictions, num_classes = NUM_CLASSES)\n",
    "    }\n",
    "  else:\n",
    "    loss = None\n",
    "    train_op = None\n",
    "    eval_metric_ops = None\n",
    "\n",
    "  # 3. Create predictions\n",
    "  predictions_dict = {\"logits\": top_k_logits, \n",
    "                      \"probabilities\": top_k_probabilities.values, \n",
    "                      \"predictions\": top_k_predictions,\n",
    "                      \"classes\": top_k_classes}\n",
    "\n",
    "  # 4. Create export outputs\n",
    "  export_outputs = {\"predict_export_outputs\": tf.estimator.export.PredictOutput(outputs = predictions_dict)}\n",
    "\n",
    "  # 5. Return EstimatorSpec\n",
    "  return tf.estimator.EstimatorSpec(\n",
    "    mode = mode,\n",
    "    predictions = predictions_dict,\n",
    "    loss = loss,\n",
    "    train_op = train_op,\n",
    "    eval_metric_ops = eval_metric_ops,\n",
    "    export_outputs = export_outputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Create our serving input function to accept the data at serving and send it in the right format to our custom estimator\n",
    "def serving_input_fn():\n",
    "  # This function fixes the shape and type of our input strings\n",
    "  def fix_shape_and_type_for_serving(placeholder):\n",
    "    # String split each string in the batch and output the values from the resulting SparseTensors\n",
    "    split_string = tf.map_fn(\n",
    "      fn = lambda x: tf.string_split(source = [placeholder[x]], delimiter=',').values, \n",
    "      elems = tf.range(start = 0, limit = tf.shape(input = placeholder)[0]), \n",
    "      dtype = tf.string) # shape = (batch_size, input_sequence_length)\n",
    "    print(\"serving_input_fn: fix_shape_and_type_for_serving: split_string = {}\".format(split_string))\n",
    "\n",
    "    # Convert each string in the split tensor to float\n",
    "    feature_tensor = tf.string_to_number(string_tensor = split_string, out_type = tf.float32) # shape = (batch_size, input_sequence_length)\n",
    "    print(\"serving_input_fn: fix_shape_and_type_for_serving: feature_tensor = {}\".format(feature_tensor))\n",
    "    return feature_tensor\n",
    "  \n",
    "  # This function fixes dynamic shape ambiguity of last dimension so that we will be able to use it in our DNN (since tf.layers.dense require the last dimension to be known)\n",
    "  def get_shape_and_set_modified_shape_3D(tensor, additional_dimension_sizes):\n",
    "    # Get static shape for tensor and convert it to list\n",
    "    shape = tensor.get_shape().as_list()\n",
    "    # Set outer shape to additional_dimension_sizes[0] * additional_dimension_sizes[1] since we know that this is the correct size\n",
    "    shape[1] = additional_dimension_sizes[0] * additional_dimension_sizes[0]\n",
    "    # Set the shape of tensor to our modified shape\n",
    "    tensor.set_shape(shape = shape) # shape = (batch_size, additional_dimension_sizes[0] * additional_dimension_sizes[1])\n",
    "    print(\"serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = {}, additional_dimension_sizes = {}\".format(tensor, additional_dimension_sizes))\n",
    "    # Finally reshape tensor into the shape it is supposed to be for the model function\n",
    "    tensor = tf.reshape(tensor = tensor, shape = [-1, additional_dimension_sizes[0], additional_dimension_sizes[1]]) # shape = (batch_size, additional_dimension_sizes[0], additional_dimension_sizes[1])\n",
    "    print(\"serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = {}, additional_dimension_sizes = {}\".format(tensor, additional_dimension_sizes))\n",
    "    return tensor\n",
    "  \n",
    "  # Create placeholders to accept the data sent to the model at serving time\n",
    "  feature_placeholders = { # all features come in as a batch of strings, shape = (batch_size,), this was so because of passing the arrays to online ml-engine prediction\n",
    "    'video_id': tf.placeholder(dtype = tf.string, shape = [None]),\n",
    "    'rgb': tf.placeholder(dtype = tf.string, shape = [None]),\n",
    "    'audio': tf.placeholder(dtype = tf.string, shape = [None])\n",
    "  }\n",
    "  print(\"\\nserving_input_fn: feature_placeholders = {}\".format(feature_placeholders))\n",
    "\n",
    "  # Create feature tensors\n",
    "  features = {\n",
    "    \"video_id\": feature_placeholders[\"video_id\"],\n",
    "    \"rgb\": fix_shape_and_type_for_serving(placeholder = feature_placeholders[\"rgb\"]),\n",
    "    \"audio\": fix_shape_and_type_for_serving(placeholder = feature_placeholders[\"audio\"])\n",
    "  }\n",
    "  print(\"serving_input_fn: features = {}\".format(features))\n",
    "\n",
    "  # Fix dynamic shape ambiguity of feature tensors for our DNN\n",
    "  features[\"rgb\"] = get_shape_and_set_modified_shape_3D(tensor = features[\"rgb\"], additional_dimension_sizes = [MAX_FRAMES, 1024])\n",
    "  features[\"audio\"] = get_shape_and_set_modified_shape_3D(tensor = features[\"audio\"], additional_dimension_sizes = [MAX_FRAMES, 128])\n",
    "  print(\"serving_input_fn: features = {}\\n\".format(features))\n",
    "  \n",
    "  return tf.estimator.export.ServingInputReceiver(features = features, receiver_tensors = feature_placeholders)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Create custom estimator's train and evaluate function\n",
    "def train_and_evaluate(args):\n",
    "  # Create our custome estimator using our model function\n",
    "  estimator = tf.estimator.Estimator(\n",
    "    model_fn = frame_level_model, \n",
    "    model_dir = args['output_dir'],\n",
    "    params = {'hidden_units': args['hidden_units'], 'top_k': args['top_k']})\n",
    "  # Create train spec to read in our training data\n",
    "  train_spec = tf.estimator.TrainSpec(\n",
    "    input_fn = read_dataset_frame(\n",
    "      file_pattern = args['train_file_pattern'], \n",
    "      mode = tf.estimator.ModeKeys.TRAIN, \n",
    "      batch_size = args['batch_size']),\n",
    "    max_steps = args['train_steps'])\n",
    "  # Create exporter to save out the complete model to disk\n",
    "  exporter = tf.estimator.LatestExporter(name = 'exporter', serving_input_receiver_fn = serving_input_fn)\n",
    "  # Create eval spec to read in our validation data and export our model\n",
    "  eval_spec = tf.estimator.EvalSpec(\n",
    "    input_fn = read_dataset_frame(\n",
    "      file_pattern = args['eval_file_pattern'], \n",
    "      mode = tf.estimator.ModeKeys.EVAL, \n",
    "      batch_size = args['batch_size']),\n",
    "    steps = None,\n",
    "    exporters = exporter,\n",
    "    start_delay_secs = args['start_delay_secs'],\n",
    "    throttle_secs = args['throttle_secs'])\n",
    "  # Create train and evaluate loop to train and evaluate our estimator\n",
    "  tf.estimator.train_and_evaluate(estimator = estimator, train_spec = train_spec, eval_spec = eval_spec)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Using default config.\n",
      "INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_task_type': 'worker', '_train_distribute': None, '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f5fe6681c10>, '_evaluation_master': '', '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_service': None, '_num_ps_replicas': 0, '_tf_random_seed': None, '_master': '', '_num_worker_replicas': 1, '_task_id': 0, '_log_step_count_steps': 100, '_model_dir': 'trained_model', '_global_id_in_cluster': 0, '_save_summary_steps': 100}\n",
      "INFO:tensorflow:Running training and evaluation locally (non-distributed).\n",
      "INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 30 secs (eval_spec.throttle_secs) or training is finished.\n",
      "\n",
      "read_dataset_frame: _input_fn: file_pattern = gs://youtube-8m-team/1/frame_level/train/train*.tfrecord\n",
      "read_dataset_frame: _input_fn: mode = train\n",
      "read_dataset_frame: _input_fn: batch_size = 10\n",
      "read_dataset_frame: _input_fn: contexts = {'labels': <tensorflow.python.framework.sparse_tensor.SparseTensor object at 0x7f5fe5ef1f10>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_rgb = Tensor(\"Reshape:0\", shape=(?, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: rgb_matrix = Tensor(\"concat:0\", shape=(20, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_audio = Tensor(\"Reshape_1:0\", shape=(?, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: audio_matrix = Tensor(\"concat_1:0\", shape=(20, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'labels': <tf.Tensor 'Cast_4:0' shape=(4716,) dtype=float32>, 'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_features = {'labels': <tf.Tensor 'shuffle_batch:1' shape=(?, 4716) dtype=float32>, 'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "\n",
      "frame_level_model: features = {'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "frame_level_model: labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "frame_level_model: mode = train\n",
      "frame_level_model: current_batch_size = Tensor(\"strided_slice:0\", shape=(), dtype=int32)\n",
      "frame_level_model: combined_features = Tensor(\"concat_2:0\", shape=(?, 20, 1152), dtype=float32)\n",
      "frame_level_model: reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = Tensor(\"dense/Relu:0\", shape=(?, 1024), dtype=float32), units = 1024\n",
      "frame_level_model: network = Tensor(\"dense_1/Relu:0\", shape=(?, 256), dtype=float32), units = 256\n",
      "frame_level_model: network = Tensor(\"dense_2/Relu:0\", shape=(?, 64), dtype=float32), units = 64\n",
      "frame_level_model: logits = Tensor(\"dense_3/BiasAdd:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: probabilities = Tensor(\"Sigmoid:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: reshaped_probabilities = Tensor(\"Reshape_3:0\", shape=(?, 20, 4716), dtype=float32)\n",
      "frame_level_model: average_probabilities_over_frames = Tensor(\"Mean:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_probabilities = TopKV2(values=<tf.Tensor 'TopKV2:0' shape=(?, 5) dtype=float32>, indices=<tf.Tensor 'TopKV2:1' shape=(?, 5) dtype=int32>)\n",
      "frame_level_model: inverse_probabilities_logits = Tensor(\"sub_3:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_logits = Tensor(\"map/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 5), dtype=float32)\n",
      "frame_level_model: top_k_classes = Tensor(\"TopKV2:1\", shape=(?, 5), dtype=int32)\n",
      "frame_level_model: predictions = Tensor(\"Select:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_predictions = Tensor(\"Select_1:0\", shape=(?, 5), dtype=float32)\n",
      "\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Saving checkpoints for 1 into trained_model/model.ckpt.\n",
      "INFO:tensorflow:loss = 1.7490635, step = 1\n",
      "INFO:tensorflow:Saving checkpoints for 73 into trained_model/model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 0.6233765.\n",
      "\n",
      "read_dataset_frame: _input_fn: file_pattern = gs://youtube-8m-team/1/frame_level/validate/validate-0.tfrecord\n",
      "read_dataset_frame: _input_fn: mode = eval\n",
      "read_dataset_frame: _input_fn: batch_size = 10\n",
      "read_dataset_frame: _input_fn: contexts = {'labels': <tensorflow.python.framework.sparse_tensor.SparseTensor object at 0x7f60342e1950>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_rgb = Tensor(\"Reshape:0\", shape=(?, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: rgb_matrix = Tensor(\"concat:0\", shape=(20, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_audio = Tensor(\"Reshape_1:0\", shape=(?, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: audio_matrix = Tensor(\"concat_1:0\", shape=(20, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'labels': <tf.Tensor 'Cast_4:0' shape=(4716,) dtype=float32>, 'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_features = {'labels': <tf.Tensor 'shuffle_batch:1' shape=(?, 4716) dtype=float32>, 'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "\n",
      "frame_level_model: features = {'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "frame_level_model: labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "frame_level_model: mode = eval\n",
      "frame_level_model: current_batch_size = Tensor(\"strided_slice:0\", shape=(), dtype=int32)\n",
      "frame_level_model: combined_features = Tensor(\"concat_2:0\", shape=(?, 20, 1152), dtype=float32)\n",
      "frame_level_model: reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = Tensor(\"dense/Relu:0\", shape=(?, 1024), dtype=float32), units = 1024\n",
      "frame_level_model: network = Tensor(\"dense_1/Relu:0\", shape=(?, 256), dtype=float32), units = 256\n",
      "frame_level_model: network = Tensor(\"dense_2/Relu:0\", shape=(?, 64), dtype=float32), units = 64\n",
      "frame_level_model: logits = Tensor(\"dense_3/BiasAdd:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: probabilities = Tensor(\"Sigmoid:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: reshaped_probabilities = Tensor(\"Reshape_3:0\", shape=(?, 20, 4716), dtype=float32)\n",
      "frame_level_model: average_probabilities_over_frames = Tensor(\"Mean:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_probabilities = TopKV2(values=<tf.Tensor 'TopKV2:0' shape=(?, 5) dtype=float32>, indices=<tf.Tensor 'TopKV2:1' shape=(?, 5) dtype=int32>)\n",
      "frame_level_model: inverse_probabilities_logits = Tensor(\"sub_3:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_logits = Tensor(\"map/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 5), dtype=float32)\n",
      "frame_level_model: top_k_classes = Tensor(\"TopKV2:1\", shape=(?, 5), dtype=int32)\n",
      "frame_level_model: predictions = Tensor(\"Select:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_predictions = Tensor(\"Select_1:0\", shape=(?, 5), dtype=float32)\n",
      "\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Starting evaluation at 2018-05-25-05:15:06\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from trained_model/model.ckpt-73\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Finished evaluation at 2018-05-25-05:15:13\n",
      "INFO:tensorflow:Saving dict for global step 73: accuracy = 0.0002120441, global_step = 73, loss = 0.5290011\n",
      "\n",
      "serving_input_fn: feature_placeholders = {'rgb': <tf.Tensor 'Placeholder_1:0' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'Placeholder_2:0' shape=(?,) dtype=string>}\n",
      "serving_input_fn: fix_shape_and_type_for_serving: split_string = Tensor(\"map/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, ?), dtype=string)\n",
      "serving_input_fn: fix_shape_and_type_for_serving: feature_tensor = Tensor(\"StringToNumber:0\", shape=(?, ?), dtype=float32)\n",
      "serving_input_fn: fix_shape_and_type_for_serving: split_string = Tensor(\"map_1/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, ?), dtype=string)\n",
      "serving_input_fn: fix_shape_and_type_for_serving: feature_tensor = Tensor(\"StringToNumber_1:0\", shape=(?, ?), dtype=float32)\n",
      "serving_input_fn: features = {'rgb': <tf.Tensor 'StringToNumber:0' shape=(?, ?) dtype=float32>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'StringToNumber_1:0' shape=(?, ?) dtype=float32>}\n",
      "serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = Tensor(\"StringToNumber:0\", shape=(?, 400), dtype=float32), additional_dimension_sizes = [20, 1024]\n",
      "serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = Tensor(\"Reshape:0\", shape=(?, 20, 1024), dtype=float32), additional_dimension_sizes = [20, 1024]\n",
      "serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = Tensor(\"StringToNumber_1:0\", shape=(?, 400), dtype=float32), additional_dimension_sizes = [20, 128]\n",
      "serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = Tensor(\"Reshape_1:0\", shape=(?, 20, 128), dtype=float32), additional_dimension_sizes = [20, 128]\n",
      "serving_input_fn: features = {'rgb': <tf.Tensor 'Reshape:0' shape=(?, 20, 1024) dtype=float32>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'Reshape_1:0' shape=(?, 20, 128) dtype=float32>}\n",
      "\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "\n",
      "frame_level_model: features = {'rgb': <tf.Tensor 'Reshape:0' shape=(?, 20, 1024) dtype=float32>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'Reshape_1:0' shape=(?, 20, 128) dtype=float32>}\n",
      "frame_level_model: labels = None\n",
      "frame_level_model: mode = infer\n",
      "frame_level_model: current_batch_size = Tensor(\"strided_slice_2:0\", shape=(), dtype=int32)\n",
      "frame_level_model: combined_features = Tensor(\"concat:0\", shape=(?, 20, 1152), dtype=float32)\n",
      "frame_level_model: reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = Tensor(\"dense/Relu:0\", shape=(?, 1024), dtype=float32), units = 1024\n",
      "frame_level_model: network = Tensor(\"dense_1/Relu:0\", shape=(?, 256), dtype=float32), units = 256\n",
      "frame_level_model: network = Tensor(\"dense_2/Relu:0\", shape=(?, 64), dtype=float32), units = 64\n",
      "frame_level_model: logits = Tensor(\"dense_3/BiasAdd:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: probabilities = Tensor(\"Sigmoid:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: reshaped_probabilities = Tensor(\"Reshape_3:0\", shape=(?, 20, 4716), dtype=float32)\n",
      "frame_level_model: average_probabilities_over_frames = Tensor(\"Mean:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_probabilities = TopKV2(values=<tf.Tensor 'TopKV2:0' shape=(?, 5) dtype=float32>, indices=<tf.Tensor 'TopKV2:1' shape=(?, 5) dtype=int32>)\n",
      "frame_level_model: inverse_probabilities_logits = Tensor(\"sub_1:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_logits = Tensor(\"map_2/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 5), dtype=float32)\n",
      "frame_level_model: top_k_classes = Tensor(\"TopKV2:1\", shape=(?, 5), dtype=int32)\n",
      "frame_level_model: predictions = Tensor(\"Select:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_predictions = Tensor(\"Select_1:0\", shape=(?, 5), dtype=float32)\n",
      "\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Classify: None\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Regress: None\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Predict: ['serving_default', 'predict_export_outputs']\n",
      "INFO:tensorflow:Restoring parameters from trained_model/model.ckpt-73\n",
      "INFO:tensorflow:Assets added to graph.\n",
      "INFO:tensorflow:No assets to write.\n",
      "INFO:tensorflow:SavedModel written to: trained_model/export/exporter/temp-1527225314/saved_model.pb\n",
      "\n",
      "read_dataset_frame: _input_fn: file_pattern = gs://youtube-8m-team/1/frame_level/train/train*.tfrecord\n",
      "read_dataset_frame: _input_fn: mode = train\n",
      "read_dataset_frame: _input_fn: batch_size = 10\n",
      "read_dataset_frame: _input_fn: contexts = {'labels': <tensorflow.python.framework.sparse_tensor.SparseTensor object at 0x7f5fe663fe50>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_rgb = Tensor(\"Reshape:0\", shape=(?, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: rgb_matrix = Tensor(\"concat:0\", shape=(20, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_audio = Tensor(\"Reshape_1:0\", shape=(?, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: audio_matrix = Tensor(\"concat_1:0\", shape=(20, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'labels': <tf.Tensor 'Cast_4:0' shape=(4716,) dtype=float32>, 'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_features = {'labels': <tf.Tensor 'shuffle_batch:1' shape=(?, 4716) dtype=float32>, 'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "\n",
      "frame_level_model: features = {'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "frame_level_model: labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "frame_level_model: mode = train\n",
      "frame_level_model: current_batch_size = Tensor(\"strided_slice:0\", shape=(), dtype=int32)\n",
      "frame_level_model: combined_features = Tensor(\"concat_2:0\", shape=(?, 20, 1152), dtype=float32)\n",
      "frame_level_model: reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = Tensor(\"dense/Relu:0\", shape=(?, 1024), dtype=float32), units = 1024\n",
      "frame_level_model: network = Tensor(\"dense_1/Relu:0\", shape=(?, 256), dtype=float32), units = 256\n",
      "frame_level_model: network = Tensor(\"dense_2/Relu:0\", shape=(?, 64), dtype=float32), units = 64\n",
      "frame_level_model: logits = Tensor(\"dense_3/BiasAdd:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: probabilities = Tensor(\"Sigmoid:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: reshaped_probabilities = Tensor(\"Reshape_3:0\", shape=(?, 20, 4716), dtype=float32)\n",
      "frame_level_model: average_probabilities_over_frames = Tensor(\"Mean:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_probabilities = TopKV2(values=<tf.Tensor 'TopKV2:0' shape=(?, 5) dtype=float32>, indices=<tf.Tensor 'TopKV2:1' shape=(?, 5) dtype=int32>)\n",
      "frame_level_model: inverse_probabilities_logits = Tensor(\"sub_3:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_logits = Tensor(\"map/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 5), dtype=float32)\n",
      "frame_level_model: top_k_classes = Tensor(\"TopKV2:1\", shape=(?, 5), dtype=int32)\n",
      "frame_level_model: predictions = Tensor(\"Select:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_predictions = Tensor(\"Select_1:0\", shape=(?, 5), dtype=float32)\n",
      "\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from trained_model/model.ckpt-73\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Saving checkpoints for 74 into trained_model/model.ckpt.\n",
      "INFO:tensorflow:loss = 0.39211097, step = 74\n",
      "INFO:tensorflow:Saving checkpoints for 100 into trained_model/model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 0.5840549.\n",
      "\n",
      "read_dataset_frame: _input_fn: file_pattern = gs://youtube-8m-team/1/frame_level/validate/validate-0.tfrecord\n",
      "read_dataset_frame: _input_fn: mode = eval\n",
      "read_dataset_frame: _input_fn: batch_size = 10\n",
      "read_dataset_frame: _input_fn: contexts = {'labels': <tensorflow.python.framework.sparse_tensor.SparseTensor object at 0x7f5fe6e8c050>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_rgb = Tensor(\"Reshape:0\", shape=(?, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: rgb_matrix = Tensor(\"concat:0\", shape=(20, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_audio = Tensor(\"Reshape_1:0\", shape=(?, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: audio_matrix = Tensor(\"concat_1:0\", shape=(20, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'labels': <tf.Tensor 'Cast_4:0' shape=(4716,) dtype=float32>, 'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_features = {'labels': <tf.Tensor 'shuffle_batch:1' shape=(?, 4716) dtype=float32>, 'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "\n",
      "frame_level_model: features = {'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "frame_level_model: labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "frame_level_model: mode = eval\n",
      "frame_level_model: current_batch_size = Tensor(\"strided_slice:0\", shape=(), dtype=int32)\n",
      "frame_level_model: combined_features = Tensor(\"concat_2:0\", shape=(?, 20, 1152), dtype=float32)\n",
      "frame_level_model: reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = Tensor(\"dense/Relu:0\", shape=(?, 1024), dtype=float32), units = 1024\n",
      "frame_level_model: network = Tensor(\"dense_1/Relu:0\", shape=(?, 256), dtype=float32), units = 256\n",
      "frame_level_model: network = Tensor(\"dense_2/Relu:0\", shape=(?, 64), dtype=float32), units = 64\n",
      "frame_level_model: logits = Tensor(\"dense_3/BiasAdd:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: probabilities = Tensor(\"Sigmoid:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: reshaped_probabilities = Tensor(\"Reshape_3:0\", shape=(?, 20, 4716), dtype=float32)\n",
      "frame_level_model: average_probabilities_over_frames = Tensor(\"Mean:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_probabilities = TopKV2(values=<tf.Tensor 'TopKV2:0' shape=(?, 5) dtype=float32>, indices=<tf.Tensor 'TopKV2:1' shape=(?, 5) dtype=int32>)\n",
      "frame_level_model: inverse_probabilities_logits = Tensor(\"sub_3:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_logits = Tensor(\"map/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 5), dtype=float32)\n",
      "frame_level_model: top_k_classes = Tensor(\"TopKV2:1\", shape=(?, 5), dtype=int32)\n",
      "frame_level_model: predictions = Tensor(\"Select:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_predictions = Tensor(\"Select_1:0\", shape=(?, 5), dtype=float32)\n",
      "\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Starting evaluation at 2018-05-25-05:15:27\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from trained_model/model.ckpt-100\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Finished evaluation at 2018-05-25-05:15:30\n",
      "INFO:tensorflow:Saving dict for global step 100: accuracy = 0.0002120441, global_step = 100, loss = 0.43645155\n",
      "\n",
      "serving_input_fn: feature_placeholders = {'rgb': <tf.Tensor 'Placeholder_1:0' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'Placeholder_2:0' shape=(?,) dtype=string>}\n",
      "serving_input_fn: fix_shape_and_type_for_serving: split_string = Tensor(\"map/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, ?), dtype=string)\n",
      "serving_input_fn: fix_shape_and_type_for_serving: feature_tensor = Tensor(\"StringToNumber:0\", shape=(?, ?), dtype=float32)\n",
      "serving_input_fn: fix_shape_and_type_for_serving: split_string = Tensor(\"map_1/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, ?), dtype=string)\n",
      "serving_input_fn: fix_shape_and_type_for_serving: feature_tensor = Tensor(\"StringToNumber_1:0\", shape=(?, ?), dtype=float32)\n",
      "serving_input_fn: features = {'rgb': <tf.Tensor 'StringToNumber:0' shape=(?, ?) dtype=float32>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'StringToNumber_1:0' shape=(?, ?) dtype=float32>}\n",
      "serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = Tensor(\"StringToNumber:0\", shape=(?, 400), dtype=float32), additional_dimension_sizes = [20, 1024]\n",
      "serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = Tensor(\"Reshape:0\", shape=(?, 20, 1024), dtype=float32), additional_dimension_sizes = [20, 1024]\n",
      "serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = Tensor(\"StringToNumber_1:0\", shape=(?, 400), dtype=float32), additional_dimension_sizes = [20, 128]\n",
      "serving_input_fn: get_shape_and_set_modified_shape_3D: tensor = Tensor(\"Reshape_1:0\", shape=(?, 20, 128), dtype=float32), additional_dimension_sizes = [20, 128]\n",
      "serving_input_fn: features = {'rgb': <tf.Tensor 'Reshape:0' shape=(?, 20, 1024) dtype=float32>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'Reshape_1:0' shape=(?, 20, 128) dtype=float32>}\n",
      "\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "\n",
      "frame_level_model: features = {'rgb': <tf.Tensor 'Reshape:0' shape=(?, 20, 1024) dtype=float32>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'Reshape_1:0' shape=(?, 20, 128) dtype=float32>}\n",
      "frame_level_model: labels = None\n",
      "frame_level_model: mode = infer\n",
      "frame_level_model: current_batch_size = Tensor(\"strided_slice_2:0\", shape=(), dtype=int32)\n",
      "frame_level_model: combined_features = Tensor(\"concat:0\", shape=(?, 20, 1152), dtype=float32)\n",
      "frame_level_model: reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = Tensor(\"dense/Relu:0\", shape=(?, 1024), dtype=float32), units = 1024\n",
      "frame_level_model: network = Tensor(\"dense_1/Relu:0\", shape=(?, 256), dtype=float32), units = 256\n",
      "frame_level_model: network = Tensor(\"dense_2/Relu:0\", shape=(?, 64), dtype=float32), units = 64\n",
      "frame_level_model: logits = Tensor(\"dense_3/BiasAdd:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: probabilities = Tensor(\"Sigmoid:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: reshaped_probabilities = Tensor(\"Reshape_3:0\", shape=(?, 20, 4716), dtype=float32)\n",
      "frame_level_model: average_probabilities_over_frames = Tensor(\"Mean:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_probabilities = TopKV2(values=<tf.Tensor 'TopKV2:0' shape=(?, 5) dtype=float32>, indices=<tf.Tensor 'TopKV2:1' shape=(?, 5) dtype=int32>)\n",
      "frame_level_model: inverse_probabilities_logits = Tensor(\"sub_1:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_logits = Tensor(\"map_2/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 5), dtype=float32)\n",
      "frame_level_model: top_k_classes = Tensor(\"TopKV2:1\", shape=(?, 5), dtype=int32)\n",
      "frame_level_model: predictions = Tensor(\"Select:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_predictions = Tensor(\"Select_1:0\", shape=(?, 5), dtype=float32)\n",
      "\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Classify: None\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Regress: None\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Predict: ['serving_default', 'predict_export_outputs']\n",
      "INFO:tensorflow:Restoring parameters from trained_model/model.ckpt-100\n",
      "INFO:tensorflow:Assets added to graph.\n",
      "INFO:tensorflow:No assets to write.\n",
      "INFO:tensorflow:SavedModel written to: trained_model/export/exporter/temp-1527225331/saved_model.pb\n"
     ]
    }
   ],
   "source": [
    "# Run the training job\n",
    "shutil.rmtree(arguments['output_dir'], ignore_errors = True) # start fresh each time\n",
    "train_and_evaluate(args = arguments)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "# Training"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "### Local"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "read_dataset_frame: _input_fn: file_pattern = gs://youtube-8m-team/1/frame_level/train/train*.tfrecord\n",
      "read_dataset_frame: _input_fn: mode = train\n",
      "read_dataset_frame: _input_fn: batch_size = 10\n",
      "read_dataset_frame: _input_fn: contexts = {'labels': <tensorflow.python.framework.sparse_tensor.SparseTensor object at 0x7f27da7d25d0>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_rgb = Tensor(\"Reshape:0\", shape=(?, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: rgb_matrix = Tensor(\"concat:0\", shape=(20, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_audio = Tensor(\"Reshape_1:0\", shape=(?, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: audio_matrix = Tensor(\"concat_1:0\", shape=(20, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'labels': <tf.Tensor 'Cast_4:0' shape=(4716,) dtype=float32>, 'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_features = {'labels': <tf.Tensor 'shuffle_batch:1' shape=(?, 4716) dtype=float32>, 'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "\n",
      "\n",
      "frame_level_model: features = {'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "frame_level_model: labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "frame_level_model: mode = train\n",
      "frame_level_model: current_batch_size = Tensor(\"strided_slice:0\", shape=(), dtype=int32)\n",
      "frame_level_model: combined_features = Tensor(\"concat_2:0\", shape=(?, 20, 1152), dtype=float32)\n",
      "frame_level_model: reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = Tensor(\"dense/Relu:0\", shape=(?, 1024), dtype=float32), units = 1024\n",
      "frame_level_model: network = Tensor(\"dense_1/Relu:0\", shape=(?, 512), dtype=float32), units = 512\n",
      "frame_level_model: network = Tensor(\"dense_2/Relu:0\", shape=(?, 256), dtype=float32), units = 256\n",
      "frame_level_model: logits = Tensor(\"dense_3/BiasAdd:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: probabilities = Tensor(\"Sigmoid:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: reshaped_probabilities = Tensor(\"Reshape_3:0\", shape=(?, 20, 4716), dtype=float32)\n",
      "frame_level_model: average_probabilities_over_frames = Tensor(\"Mean:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_probabilities = TopKV2(values=<tf.Tensor 'TopKV2:0' shape=(?, 5) dtype=float32>, indices=<tf.Tensor 'TopKV2:1' shape=(?, 5) dtype=int32>)\n",
      "frame_level_model: inverse_probabilities_logits = Tensor(\"sub_3:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_logits = Tensor(\"map/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 5), dtype=float32)\n",
      "frame_level_model: top_k_classes = Tensor(\"TopKV2:1\", shape=(?, 5), dtype=int32)\n",
      "frame_level_model: predictions = Tensor(\"Select:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_predictions = Tensor(\"Select_1:0\", shape=(?, 5), dtype=float32)\n",
      "\n",
      "\n",
      "read_dataset_frame: _input_fn: file_pattern = gs://youtube-8m-team/1/frame_level/validate/validate-0.tfrecord\n",
      "read_dataset_frame: _input_fn: mode = eval\n",
      "read_dataset_frame: _input_fn: batch_size = 10\n",
      "read_dataset_frame: _input_fn: contexts = {'labels': <tensorflow.python.framework.sparse_tensor.SparseTensor object at 0x7f27d00d4e90>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_rgb = Tensor(\"Reshape:0\", shape=(?, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: rgb_matrix = Tensor(\"concat:0\", shape=(20, 1024), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_audio = Tensor(\"Reshape_1:0\", shape=(?, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: audio_matrix = Tensor(\"concat_1:0\", shape=(20, 128), dtype=float32, device=/device:CPU:0)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'labels': <tf.Tensor 'Cast_4:0' shape=(4716,) dtype=float32>, 'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_features = {'labels': <tf.Tensor 'shuffle_batch:1' shape=(?, 4716) dtype=float32>, 'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "\n",
      "\n",
      "frame_level_model: features = {'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "frame_level_model: labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32, device=/device:CPU:0)\n",
      "frame_level_model: mode = eval\n",
      "frame_level_model: current_batch_size = Tensor(\"strided_slice:0\", shape=(), dtype=int32)\n",
      "frame_level_model: combined_features = Tensor(\"concat_2:0\", shape=(?, 20, 1152), dtype=float32)\n",
      "frame_level_model: reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = Tensor(\"dense/Relu:0\", shape=(?, 1024), dtype=float32), units = 1024\n",
      "frame_level_model: network = Tensor(\"dense_1/Relu:0\", shape=(?, 512), dtype=float32), units = 512\n",
      "frame_level_model: network = Tensor(\"dense_2/Relu:0\", shape=(?, 256), dtype=float32), units = 256\n",
      "frame_level_model: logits = Tensor(\"dense_3/BiasAdd:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: probabilities = Tensor(\"Sigmoid:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: reshaped_probabilities = Tensor(\"Reshape_3:0\", shape=(?, 20, 4716), dtype=float32)\n",
      "frame_level_model: average_probabilities_over_frames = Tensor(\"Mean:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_probabilities = TopKV2(values=<tf.Tensor 'TopKV2:0' shape=(?, 5) dtype=float32>, indices=<tf.Tensor 'TopKV2:1' shape=(?, 5) dtype=int32>)\n",
      "frame_level_model: inverse_probabilities_logits = Tensor(\"sub_3:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_logits = Tensor(\"map/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 5), dtype=float32)\n",
      "frame_level_model: top_k_classes = Tensor(\"TopKV2:1\", shape=(?, 5), dtype=int32)\n",
      "frame_level_model: predictions = Tensor(\"Select:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_predictions = Tensor(\"Select_1:0\", shape=(?, 5), dtype=float32)\n",
      "\n",
      "\n",
      "serving_input_fn: feature_placeholders = {'rgb': <tf.Tensor 'Placeholder_1:0' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'Placeholder_2:0' shape=(?,) dtype=string>}\n",
      "serving_input_fn: features[video_id] = Tensor(\"Placeholder:0\", shape=(?,), dtype=string)\n",
      "serving_input_fn: rgb_split_string = Tensor(\"map/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, ?), dtype=string)\n",
      "serving_input_fn: features[rgb] = Tensor(\"StringToNumber:0\", shape=(?, ?), dtype=float32)\n",
      "serving_input_fn: audio_split_string = Tensor(\"map_1/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, ?), dtype=string)\n",
      "serving_input_fn: features[audio] = Tensor(\"StringToNumber_1:0\", shape=(?, ?), dtype=float32)\n",
      "serving_input_fn: features = {'rgb': <tf.Tensor 'StringToNumber:0' shape=(?, ?) dtype=float32>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'StringToNumber_1:0' shape=(?, ?) dtype=float32>}\n",
      "serving_input_fn: features[rgb] = Tensor(\"StringToNumber:0\", shape=(?, 20480), dtype=float32)\n",
      "serving_input_fn: features[rgb] = Tensor(\"Reshape:0\", shape=(?, 20, 1024), dtype=float32)\n",
      "serving_input_fn: features[audio] = Tensor(\"StringToNumber_1:0\", shape=(?, 2560), dtype=float32)\n",
      "serving_input_fn: features[audio] = Tensor(\"Reshape_1:0\", shape=(?, 20, 128), dtype=float32)\n",
      "serving_input_fn: features = {'rgb': <tf.Tensor 'Reshape:0' shape=(?, 20, 1024) dtype=float32>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'Reshape_1:0' shape=(?, 20, 128) dtype=float32>}\n",
      "\n",
      "\n",
      "frame_level_model: features = {'rgb': <tf.Tensor 'Reshape:0' shape=(?, 20, 1024) dtype=float32>, 'video_id': <tf.Tensor 'Placeholder:0' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'Reshape_1:0' shape=(?, 20, 128) dtype=float32>}\n",
      "frame_level_model: labels = None\n",
      "frame_level_model: mode = infer\n",
      "frame_level_model: current_batch_size = Tensor(\"strided_slice_2:0\", shape=(), dtype=int32)\n",
      "frame_level_model: combined_features = Tensor(\"concat:0\", shape=(?, 20, 1152), dtype=float32)\n",
      "frame_level_model: reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = reshaped_combined_features = Tensor(\"Reshape_2:0\", shape=(?, 1152), dtype=float32)\n",
      "frame_level_model: network = Tensor(\"dense/Relu:0\", shape=(?, 1024), dtype=float32), units = 1024\n",
      "frame_level_model: network = Tensor(\"dense_1/Relu:0\", shape=(?, 512), dtype=float32), units = 512\n",
      "frame_level_model: network = Tensor(\"dense_2/Relu:0\", shape=(?, 256), dtype=float32), units = 256\n",
      "frame_level_model: logits = Tensor(\"dense_3/BiasAdd:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: probabilities = Tensor(\"Sigmoid:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: reshaped_probabilities = Tensor(\"Reshape_3:0\", shape=(?, 20, 4716), dtype=float32)\n",
      "frame_level_model: average_probabilities_over_frames = Tensor(\"Mean:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_probabilities = TopKV2(values=<tf.Tensor 'TopKV2:0' shape=(?, 5) dtype=float32>, indices=<tf.Tensor 'TopKV2:1' shape=(?, 5) dtype=int32>)\n",
      "frame_level_model: inverse_probabilities_logits = Tensor(\"sub_1:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_logits = Tensor(\"map_2/TensorArrayStack/TensorArrayGatherV3:0\", shape=(?, 5), dtype=float32)\n",
      "frame_level_model: top_k_classes = Tensor(\"TopKV2:1\", shape=(?, 5), dtype=int32)\n",
      "frame_level_model: predictions = Tensor(\"Select:0\", shape=(?, 4716), dtype=float32)\n",
      "frame_level_model: top_k_predictions = Tensor(\"Select_1:0\", shape=(?, 5), dtype=float32)\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/envs/py2env/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "INFO:tensorflow:Using default config.\n",
      "INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_task_type': 'worker', '_train_distribute': None, '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f27db285f10>, '_evaluation_master': '', '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_service': None, '_num_ps_replicas': 0, '_tf_random_seed': None, '_master': '', '_num_worker_replicas': 1, '_task_id': 0, '_log_step_count_steps': 100, '_model_dir': 'trained_model/', '_global_id_in_cluster': 0, '_save_summary_steps': 100}\n",
      "INFO:tensorflow:Running training and evaluation locally (non-distributed).\n",
      "INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 30 secs (eval_spec.throttle_secs) or training is finished.\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "2018-05-25 03:36:37.103387: I tensorflow/core/platform/cpu_feature_guard.cc:140] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Saving checkpoints for 1 into trained_model/model.ckpt.\n",
      "INFO:tensorflow:loss = 1.8465084, step = 1\n",
      "INFO:tensorflow:Saving checkpoints for 100 into trained_model/model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 0.6245499.\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Starting evaluation at 2018-05-25-03:36:59\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from trained_model/model.ckpt-100\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Finished evaluation at 2018-05-25-03:37:03\n",
      "INFO:tensorflow:Saving dict for global step 100: accuracy = 0.0002120441, global_step = 100, loss = 0.47935745\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Classify: None\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Regress: None\n",
      "INFO:tensorflow:Signatures INCLUDED in export for Predict: ['serving_default', 'predict_export_outputs']\n",
      "INFO:tensorflow:Restoring parameters from trained_model/model.ckpt-100\n",
      "INFO:tensorflow:Assets added to graph.\n",
      "INFO:tensorflow:No assets to write.\n",
      "INFO:tensorflow:SavedModel written to: trained_model/export/exporter/temp-1527219424/saved_model.pb\n"
     ]
    }
   ],
   "source": [
    "%bash\n",
    "OUTDIR=trained_model\n",
    "rm -rf $OUTDIR\n",
    "export PYTHONPATH=$PYTHONPATH:$PWD/trainer\n",
    "python -m trainer.task \\\n",
    "  --train_file_pattern=\"gs://youtube-8m-team/1/frame_level/train/train*.tfrecord\" \\\n",
    "  --eval_file_pattern=\"gs://youtube-8m-team/1/frame_level/validate/validate-0.tfrecord\"  \\\n",
    "  --output_dir=$OUTDIR \\\n",
    "  --batch_size=10 \\\n",
    "  --train_steps=100 \\\n",
    "  --hidden_units=\"1024 512 256\" \\\n",
    "  --top_k=5 \\\n",
    "  --job-dir=./tmp"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "### GCloud"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "gs://youtube8m-4-train/youtube_8m_frame_level/trained_model us-central1 job_youtube_8m_frame_level180525_033708\n",
      "jobId: job_youtube_8m_frame_level180525_033708\n",
      "state: QUEUED\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "CommandException: 1 files/objects could not be removed.\n",
      "Job [job_youtube_8m_frame_level180525_033708] submitted successfully.\n",
      "Your job is still active. You may view the status of your job with the command\n",
      "\n",
      "  $ gcloud ml-engine jobs describe job_youtube_8m_frame_level180525_033708\n",
      "\n",
      "or continue streaming the logs with the command\n",
      "\n",
      "  $ gcloud ml-engine jobs stream-logs job_youtube_8m_frame_level180525_033708\n"
     ]
    }
   ],
   "source": [
    "%bash\n",
    "OUTDIR=gs://$BUCKET/youtube_8m_frame_level/trained_model\n",
    "JOBNAME=job_youtube_8m_frame_level$(date -u +%y%m%d_%H%M%S)\n",
    "echo $OUTDIR $REGION $JOBNAME\n",
    "gcloud storage rm --recursive --continue-on-error $OUTDIR\n",    "gcloud ml-engine jobs submit training $JOBNAME \\\n",
    "  --region=$REGION \\\n",
    "  --module-name=trainer.task \\\n",
    "  --package-path=$PWD/trainer \\\n",
    "  --job-dir=$OUTDIR \\\n",
    "  --staging-bucket=gs://$BUCKET \\\n",
    "  --scale-tier=STANDARD_1 \\\n",
    "  --runtime-version=1.5 \\\n",
    "  -- \\\n",
    "  --train_file_pattern=\"gs://youtube-8m-team/1/frame_level/train/train*.tfrecord\" \\\n",
    "  --eval_file_pattern=\"gs://youtube-8m-team/1/frame_level/validate/validate-0.tfrecord\"  \\\n",
    "  --output_dir=$OUTDIR \\\n",
    "  --batch_size=50 \\\n",
    "  --train_steps=10000 \\\n",
    "  --hidden_units=\"1024 512 256\" \\\n",
    "  --top_k=5 \\\n",
    "  --job-dir=$OUTDIR"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Hyperparameter tuning"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Writing hyperparam.yaml\n"
     ]
    }
   ],
   "source": [
    "%writefile hyperparam.yaml\n",
    "trainingInput:\n",
    "  scaleTier: STANDARD_1\n",
    "  hyperparameters:\n",
    "    hyperparameterMetricTag: accuracy\n",
    "    goal: MAXIMIZE\n",
    "    maxTrials: 30\n",
    "    maxParallelTrials: 1\n",
    "    params:\n",
    "    - parameterName: batch_size\n",
    "      type: INTEGER\n",
    "      minValue: 8\n",
    "      maxValue: 512\n",
    "      scaleType: UNIT_LOG_SCALE\n",
    "    - parameterName: hidden_units\n",
    "      type: CATEGORICAL\n",
    "      categoricalValues: [\"64 32\", \"256 128 16\", \"64 64 64 8\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "gs://youtube8m-4-train/youtube_8m_frame_level/hyperparam us-central1 job_youtube_8m_frame_level180525_061713\n",
      "jobId: job_youtube_8m_frame_level180525_061713\n",
      "state: QUEUED\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "CommandException: 1 files/objects could not be removed.\n",
      "Job [job_youtube_8m_frame_level180525_061713] submitted successfully.\n",
      "Your job is still active. You may view the status of your job with the command\n",
      "\n",
      "  $ gcloud ml-engine jobs describe job_youtube_8m_frame_level180525_061713\n",
      "\n",
      "or continue streaming the logs with the command\n",
      "\n",
      "  $ gcloud ml-engine jobs stream-logs job_youtube_8m_frame_level180525_061713\n"
     ]
    }
   ],
   "source": [
    "%bash\n",
    "OUTDIR=gs://$BUCKET/youtube_8m_frame_level/hyperparam\n",
    "JOBNAME=job_youtube_8m_frame_level$(date -u +%y%m%d_%H%M%S)\n",
    "echo $OUTDIR $REGION $JOBNAME\n",
    "gcloud storage rm --recursive --continue-on-error $OUTDIR\n",    "gcloud ml-engine jobs submit training $JOBNAME \\\n",
    "  --region=$REGION \\\n",
    "  --module-name=trainer.task \\\n",
    "  --package-path=$PWD/trainer \\\n",
    "  --job-dir=$OUTDIR \\\n",
    "  --staging-bucket=gs://$BUCKET \\\n",
    "  --scale-tier=STANDARD_1 \\\n",
    "  --config=hyperparam.yaml \\\n",
    "  --runtime-version=1.5 \\\n",
    "  -- \\\n",
    "  --train_file_pattern=\"gs://youtube-8m-team/1/frame_level/train/train*.tfrecord\" \\\n",
    "  --eval_file_pattern=\"gs://youtube-8m-team/1/frame_level/validate/validate-0.tfrecord\"  \\\n",
    "  --output_dir=$OUTDIR \\\n",
    "  --train_steps=10000 \\\n",
    "  --top_k=5 \\\n",
    "  --job-dir=$OUTDIR"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "# Deploy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Deleting and deploying youtube_8m_frame_level v1 from gs://youtube8m-4-train/youtube_8m_frame_level/trained_model/export/exporter/1527220401/ ... this will take a few minutes\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Created ml engine model [projects/qwiklabs-gcp-8d3d0cd07cef9252/models/youtube_8m_frame_level].\n",
      "Creating version (this might take a few minutes)......\n",
      ".............................................................................................................done.\n"
     ]
    }
   ],
   "source": [
    "%bash\n",
    "MODEL_NAME=\"youtube_8m_frame_level\"\n",
    "MODEL_VERSION=\"v1\"\n",
    "MODEL_LOCATION=$(gcloud storage ls gs://$BUCKET/youtube_8m_frame_level/trained_model/export/exporter/ | tail -1)\n",    "echo \"Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes\"\n",
    "#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}\n",
    "#gcloud ml-engine models delete ${MODEL_NAME}\n",
    "gcloud ml-engine models create $MODEL_NAME --regions $REGION\n",
    "gcloud ml-engine versions create $MODEL_VERSION --model $MODEL_NAME --origin $MODEL_LOCATION --runtime-version 1.5"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "# Prediction"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "### Prep"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "read_dataset_frame: _input_fn: file_pattern = gs://youtube-8m-team/1/frame_level/validate/validate-0.tfrecord\n",
      "read_dataset_frame: _input_fn: mode = eval\n",
      "read_dataset_frame: _input_fn: batch_size = 1\n",
      "read_dataset_frame: _input_fn: contexts = {'labels': <tensorflow.python.framework.sparse_tensor.SparseTensor object at 0x7f5fe6d7ee90>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:5' shape=(?,) dtype=string>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_rgb = Tensor(\"Reshape:0\", shape=(?, 1024), dtype=float32)\n",
      "read_dataset_frame: _input_fn: rgb_matrix = Tensor(\"concat:0\", shape=(20, 1024), dtype=float32)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:4' shape=(?,) dtype=string>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: decoded_audio = Tensor(\"Reshape_1:0\", shape=(?, 128), dtype=float32)\n",
      "read_dataset_frame: _input_fn: audio_matrix = Tensor(\"concat_1:0\", shape=(20, 128), dtype=float32)\n",
      "read_dataset_frame: _input_fn: features = {'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: features = {'labels': <tf.Tensor 'Cast_4:0' shape=(4716,) dtype=float32>, 'rgb': <tf.Tensor 'concat:0' shape=(20, 1024) dtype=float32>, 'audio': <tf.Tensor 'concat_1:0' shape=(20, 128) dtype=float32>, 'video_id': <tf.Tensor 'ParseSingleSequenceExample/ParseSingleSequenceExample:3' shape=() dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_features = {'labels': <tf.Tensor 'shuffle_batch:1' shape=(?, 4716) dtype=float32>, 'rgb': <tf.Tensor 'shuffle_batch:2' shape=(?, 20, 1024) dtype=float32>, 'audio': <tf.Tensor 'shuffle_batch:0' shape=(?, 20, 128) dtype=float32>, 'video_id': <tf.Tensor 'shuffle_batch:3' shape=(?,) dtype=string>}\n",
      "read_dataset_frame: _input_fn: batch_labels = Tensor(\"shuffle_batch:1\", shape=(?, 4716), dtype=float32)\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# Let's call our input function to decode our data to put into BigQuery for testing predictions\n",
    "frame_file_pattern = 'gs://youtube-8m-team/1/frame_level/validate/validate-0.tfrecord'\n",
    "\n",
    "fn = read_dataset_frame(file_pattern = frame_file_pattern, mode = tf.estimator.ModeKeys.EVAL, batch_size = 1)\n",
    "batch_features, batch_labels = fn()\n",
    "for key,value in batch_features.items():\n",
    "  batch_features[key] = tf.squeeze(batch_features[key])\n",
    "fixed_batch_features = batch_features\n",
    "fixed_batch_labels = tf.squeeze(batch_labels)\n",
    "\n",
    "frames_list = []\n",
    "\n",
    "with tf.Session() as sess:\n",
    "  sess.run(tf.global_variables_initializer())\n",
    "  sess.run(tf.local_variables_initializer())\n",
    "  coord = tf.train.Coordinator()\n",
    "\n",
    "  threads = tf.train.start_queue_runners(sess = sess, coord = coord)\n",
    "  for _ in range(331):\n",
    "    features, labels = sess.run([fixed_batch_features, fixed_batch_labels])\n",
    "    \n",
    "    features[\"labels\"] = labels\n",
    "    frames_list.append(features)\n",
    "\n",
    "  coord.request_stop()\n",
    "  coord.join(threads)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "331"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# This is the number of videos from the frame level file we just processed\n",
    "len(frames_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Convert the nd-arrays to lists and cast to strings (video_id is already a single string)\n",
    "for items in frames_list:\n",
    "  items[\"labels\"] = str(items[\"labels\"].tolist())\n",
    "  items[\"rgb\"] = str(items[\"rgb\"].tolist())\n",
    "  items[\"audio\"] = str(items[\"audio\"].tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>video_id</th>\n",
       "      <th>rgb</th>\n",
       "      <th>audio</th>\n",
       "      <th>labels</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>-0MpgkddrY4</td>\n",
       "      <td>[[0.0313420295715332, -0.28238344192504883, 1....</td>\n",
       "      <td>[[1.3019304275512695, 0.925459623336792, 1.458...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>-02RMEBtLDo</td>\n",
       "      <td>[[-0.7686580419540405, -0.47061872482299805, 2...</td>\n",
       "      <td>[[1.082322597503662, 1.7568323612213135, 1.929...</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>-00LLAtj0JE</td>\n",
       "      <td>[[-1.9921875, 0.6587929725646973, -0.752971768...</td>\n",
       "      <td>[[0.7215380668640137, -1.5686581134796143, -0....</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>-07wapPiIAg</td>\n",
       "      <td>[[-1.1921874284744263, 1.1450676918029785, 0.8...</td>\n",
       "      <td>[[0.7215380668640137, -1.2235599756240845, -0....</td>\n",
       "      <td>[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>-0AjrnQ0N3A</td>\n",
       "      <td>[[-1.9921875, -0.8627756834030151, 0.721538066...</td>\n",
       "      <td>[[0.7215380668640137, -1.5686581134796143, -0....</td>\n",
       "      <td>[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, ...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      video_id                                                rgb  \\\n",
       "0  -0MpgkddrY4  [[0.0313420295715332, -0.28238344192504883, 1....   \n",
       "1  -02RMEBtLDo  [[-0.7686580419540405, -0.47061872482299805, 2...   \n",
       "2  -00LLAtj0JE  [[-1.9921875, 0.6587929725646973, -0.752971768...   \n",
       "3  -07wapPiIAg  [[-1.1921874284744263, 1.1450676918029785, 0.8...   \n",
       "4  -0AjrnQ0N3A  [[-1.9921875, -0.8627756834030151, 0.721538066...   \n",
       "\n",
       "                                               audio  \\\n",
       "0  [[1.3019304275512695, 0.925459623336792, 1.458...   \n",
       "1  [[1.082322597503662, 1.7568323612213135, 1.929...   \n",
       "2  [[0.7215380668640137, -1.5686581134796143, -0....   \n",
       "3  [[0.7215380668640137, -1.2235599756240845, -0....   \n",
       "4  [[0.7215380668640137, -1.5686581134796143, -0....   \n",
       "\n",
       "                                              labels  \n",
       "0  [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, ...  \n",
       "1  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "2  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "3  [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ...  \n",
       "4  [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, ...  "
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Create a dataframe from the list\n",
    "frames_df = pd.DataFrame(frames_list)\n",
    "frames_df = frames_df[[\"video_id\", \"rgb\", \"audio\", \"labels\"]]\n",
    "frames_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 155,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\n",
      "Load is 100% Complete\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# Export dataframe to BigQuery\n",
    "import datalab.bigquery as bq\n",
    "bigquery_dataset_name = 'ryan_youtube'\n",
    "bigquery_table_name = 'tbl_frame_level'\n",
    "\n",
    "# Define BigQuery dataset and table\\n\",\n",
    "dataset = bq.Dataset(bigquery_dataset_name)\n",
    "table = bq.Table(bigquery_dataset_name + '.' + bigquery_table_name)\n",
    "\n",
    "# Create BigQuery dataset\n",
    "if not dataset.exists():\n",
    "    dataset.create()\n",
    "\n",
    "# Create or overwrite the existing table if it exists\\n\",\n",
    "table_schema = bq.Schema.from_dataframe(frames_df)\n",
    "table.create(schema = table_schema, overwrite = True)\n",
    "\n",
    "frames_df.to_gbq(destination_table = bigquery_dataset_name + '.' + bigquery_table_name, project_id = \"qwiklabs-gcp-8d3d0cd07cef9252\", if_exists = \"replace\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Create SQL query\n",
    "query=\"\"\"\n",
    "SELECT\n",
    "  video_id,\n",
    "  rgb,\n",
    "  audio\n",
    "FROM\n",
    "  `qwiklabs-gcp-8d3d0cd07cef9252.ryan_youtube.tbl_frame_level`\n",
    "LIMIT\n",
    "  3\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>video_id</th>\n",
       "      <th>rgb</th>\n",
       "      <th>audio</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>-0FDy3F9Gqo</td>\n",
       "      <td>[[-0.21963834762573242, 1.1293814182281494, 0....</td>\n",
       "      <td>[[-0.7372854948043823, 0.42349886894226074, -0...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>-0CK7kXtP-U</td>\n",
       "      <td>[[-1.9921875, -0.8627756834030151, 0.721538066...</td>\n",
       "      <td>[[-0.8314031362533569, -0.21963834762573242, -...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>-0QTVHnvT90</td>\n",
       "      <td>[[-1.9921875, -0.5804227590560913, 0.439185142...</td>\n",
       "      <td>[[0.5803616046905518, -0.7215992212295532, 0.5...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      video_id                                                rgb  \\\n",
       "0  -0FDy3F9Gqo  [[-0.21963834762573242, 1.1293814182281494, 0....   \n",
       "1  -0CK7kXtP-U  [[-1.9921875, -0.8627756834030151, 0.721538066...   \n",
       "2  -0QTVHnvT90  [[-1.9921875, -0.5804227590560913, 0.439185142...   \n",
       "\n",
       "                                               audio  \n",
       "0  [[-0.7372854948043823, 0.42349886894226074, -0...  \n",
       "1  [[-0.8314031362533569, -0.21963834762573242, -...  \n",
       "2  [[0.5803616046905518, -0.7215992212295532, 0.5...  "
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Export BigQuery results to dataframe\n",
    "import google.datalab.bigquery as bq2\n",
    "df_predict = bq2.Query(query).execute().result().to_dataframe()\n",
    "df_predict.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "### Local prediction from local model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Format dataframe to new line delimited json strings and write out to json file\n",
    "with open('frame_level.json', 'w') as outfile:\n",
    "  for idx, row in df_predict.iterrows():\n",
    "    json_string = \"{\\\"video_id\\\": \\\"\" + row[\"video_id\"] + \"\\\", \\\"rgb\\\": \\\"\" + row[\"rgb\"].replace(\" \",\"\").replace(\"[\",\"\").replace(\"]\",\"\") + \"\\\", \\\"audio\\\": \\\"\" + row[\"audio\"].replace(\" \",\"\").replace(\"[\",\"\").replace(\"]\",\"\") + \"\\\"}\"\n",
    "    outfile.write(\"%s\\n\" % json_string)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CLASSES              LOGITS                                                                                                PREDICTIONS                PROBABILITIES\n",
      "[12, 34, 37, 47, 2]  [18.42068099975586, 15.861832618713379, 15.482503890991211, 15.482503890991211, 14.993090629577637]   [1.0, 1.0, 1.0, 1.0, 1.0]  [1.0, 0.9999998807907104, 0.9999998211860657, 0.9999998211860657, 0.9999997019767761]\n",
      "[12, 2, 37, 7, 13]   [11.051389694213867, 9.905458450317383, 9.862207412719727, 9.711677551269531, 9.616019248962402]      [1.0, 1.0, 1.0, 1.0, 1.0]  [0.9999841451644897, 0.999950110912323, 0.9999479055404663, 0.9999394416809082, 0.9999333620071411]\n",
      "[12, 34, 2, 37, 47]  [15.482503890991211, 14.816194534301758, 14.057759284973145, 14.057759284973145, 14.057759284973145]  [1.0, 1.0, 1.0, 1.0, 1.0]  [0.9999998211860657, 0.9999996423721313, 0.9999992251396179, 0.9999992251396179, 0.9999992251396179]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: /usr/local/envs/py2env/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "2018-05-25 05:16:19.800146: I tensorflow/core/platform/cpu_feature_guard.cc:140] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\n",
      "\n"
     ]
    }
   ],
   "source": [
    "%bash\n",
    "model_dir=$(ls $PWD/trained_model/export/exporter | tail -1)\n",
    "gcloud ml-engine local predict \\\n",
    "    --model-dir=$PWD/trained_model/export/exporter/$model_dir \\\n",
    "    --json-instances=./frame_level.json"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "deletable": true,
    "editable": true
   },
   "source": [
    "### GCloud ML-Engine prediction from deployed model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {
    "collapsed": true,
    "deletable": true,
    "editable": true
   },
   "outputs": [],
   "source": [
    "# Format dataframe to instances list to get sent to ML-Engine\n",
    "instances = [{\"video_id\": row[\"video_id\"], \"rgb\": row[\"rgb\"].replace(\" \",\"\").replace(\"[\",\"\").replace(\"]\",\"\"), \"audio\": row[\"audio\"].replace(\" \",\"\").replace(\"[\",\"\").replace(\"]\",\"\")} for idx, row in df_predict.iterrows()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {
    "collapsed": false,
    "deletable": true,
    "editable": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "response = {u'predictions': [{u'probabilities': [1.0, 1.0, 1.0, 1.0, 1.0], u'logits': [18.42068099975586, 18.42068099975586, 18.42068099975586, 18.42068099975586, 18.42068099975586], u'classes': [0, 1, 2, 3, 4], u'predictions': [1.0, 1.0, 1.0, 1.0, 1.0]}, {u'probabilities': [1.0, 1.0, 1.0, 1.0, 1.0], u'logits': [18.42068099975586, 18.42068099975586, 18.42068099975586, 18.42068099975586, 18.42068099975586], u'classes': [0, 1, 2, 3, 4], u'predictions': [1.0, 1.0, 1.0, 1.0, 1.0]}, {u'probabilities': [1.0, 1.0, 1.0, 1.0, 1.0], u'logits': [18.42068099975586, 18.42068099975586, 18.42068099975586, 18.42068099975586, 18.42068099975586], u'classes': [0, 1, 2, 3, 4], u'predictions': [1.0, 1.0, 1.0, 1.0, 1.0]}]}\n"
     ]
    }
   ],
   "source": [
    "# Send instance dictionary to receive response from ML-Engine for online prediction\n",
    "from googleapiclient import discovery\n",
    "from oauth2client.client import GoogleCredentials\n",
    "import json\n",
    "\n",
    "credentials = GoogleCredentials.get_application_default()\n",
    "api = discovery.build('ml', 'v1', credentials=credentials)\n",
    "\n",
    "request_data = {\"instances\": instances}\n",
    "\n",
    "parent = 'projects/%s/models/%s/versions/%s' % (PROJECT, 'youtube_8m_frame_level', 'v1')\n",
    "response = api.projects().predict(body = request_data, name = parent).execute()\n",
    "print(\"response = {}\".format(response))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
