{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Wide and Deep on TensorFlow (notebook style)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 对wide and deep模型的理解\n",
    "wide and deep模型的核心思想是结合线性模型的记忆能力(memorization)和DNN模型的泛化能力(generalization)，在训练过程中同时优化两个模型的参数，从而使模型的性能达到最优。WD模型分为wide组件和deep组件，wide端对应线性模型，deep段对应DNN模型。WD模型采用联合训练(joint training)，模型的训练误差会同时反馈到wide端和deep端进行参数更新。wide端和deep端分别专注于擅长的方面，wide端通过离散特征组合进行memorization，deep端通过特征的embedding进行generalization，这样单个模型的复杂度得以控制，模型的整体性能也得以提高。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 结果分析见最后"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Imports and constants"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using TensorFlow version 1.13.1\n",
      "\n",
      "Feature columns are:  ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'] \n",
      "\n"
     ]
    }
   ],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import time\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "tf.logging.set_verbosity(tf.logging.INFO) # Set to INFO for tracking training, default is WARN. ERROR for least messages\n",
    "\n",
    "print(\"Using TensorFlow version %s\\n\" % (tf.__version__))\n",
    "\n",
    "\n",
    "CONTINUOUS_COLUMNS =  [\"I\"+str(i) for i in range(1,14)] # 1-13 inclusive\n",
    "CATEGORICAL_COLUMNS = [\"C\"+str(i) for i in range(1,27)] # 1-26 inclusive\n",
    "LABEL_COLUMN = [\"clicked\"]\n",
    "\n",
    "TRAIN_DATA_COLUMNS = LABEL_COLUMN + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS\n",
    "# TEST_DATA_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS\n",
    "\n",
    "FEATURE_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS\n",
    "\n",
    "print('Feature columns are: ', FEATURE_COLUMNS, '\\n')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Input file parsing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input function configured\n"
     ]
    }
   ],
   "source": [
    "BATCH_SIZE = 400\n",
    "\n",
    "def generate_input_fn(filename, batch_size=BATCH_SIZE):\n",
    "    def _input_fn():\n",
    "        filename_queue = tf.train.string_input_producer([filename])\n",
    "        reader = tf.TextLineReader()\n",
    "        # Reads out batch_size number of lines\n",
    "        key, value = reader.read_up_to(filename_queue, num_records=batch_size)\n",
    "        \n",
    "        # 1 int label, 13 ints, 26 strings\n",
    "        cont_defaults = [ [0] for i in range(1,14) ]\n",
    "        cate_defaults = [ [\" \"] for i in range(1,27) ]\n",
    "        label_defaults = [ [0] ]\n",
    "        column_headers = TRAIN_DATA_COLUMNS\n",
    "        # The label is the first column of the data.\n",
    "        record_defaults = label_defaults + cont_defaults + cate_defaults\n",
    "\n",
    "        # Decode CSV data that was just read out. \n",
    "        # Note that this does NOT return a dict, \n",
    "        # so we will need to zip it up with our headers\n",
    "        columns = tf.decode_csv(\n",
    "            value, record_defaults=record_defaults)\n",
    "        \n",
    "        # all_columns is a dictionary that maps from column names to tensors of the data.\n",
    "        all_columns = dict(zip(column_headers, columns))\n",
    "        \n",
    "        # Pop and save our labels \n",
    "        # dict.pop() returns the popped array of values; exactly what we need!\n",
    "        labels = all_columns.pop(LABEL_COLUMN[0])\n",
    "        \n",
    "        # the remaining columns are our features\n",
    "        features = all_columns \n",
    "\n",
    "        # Sparse categorical features must be represented with an additional dimension. \n",
    "        # There is no additional work needed for the Continuous columns; they are the unaltered columns.\n",
    "        # See docs for tf.SparseTensor for more info\n",
    "        for feature_name in CATEGORICAL_COLUMNS:\n",
    "            features[feature_name] = tf.expand_dims(features[feature_name], -1)\n",
    "\n",
    "        return features, labels\n",
    "\n",
    "    return _input_fn\n",
    "\n",
    "print('input function configured')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Create Feature Columns"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Sparse Columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Wide/Sparse columns configured\n"
     ]
    }
   ],
   "source": [
    "wide_columns = []\n",
    "for name in CATEGORICAL_COLUMNS:\n",
    "    wide_columns.append(tf.feature_column.categorical_column_with_hash_bucket(\n",
    "            name, hash_bucket_size=512))\n",
    "\n",
    "print('Wide/Sparse columns configured')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Continuous columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "deep/continuous columns configured\n"
     ]
    }
   ],
   "source": [
    "deep_columns = []\n",
    "for name in CONTINUOUS_COLUMNS:\n",
    "    deep_columns.append(tf.feature_column.numeric_column(name))\n",
    "\n",
    "print('deep/continuous columns configured')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Transformations"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Group feature columns into 2 objects"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "wide and deep columns configured\n"
     ]
    }
   ],
   "source": [
    "# Embeddings for wide columns into deep columns\n",
    "for col in wide_columns:\n",
    "    deep_columns.append(tf.feature_column.embedding_column\\\n",
    "                        (col, dimension=9))\n",
    "\n",
    "print('wide and deep columns configured')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Create the model\n",
    "Wide: Linear Classifier  \n",
    "Deep: Deep Neural Net Classifier  \n",
    "Wide & Deep: Combined Linear and Deep Classifier  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_model_dir(model_type):\n",
    "    # Returns something like models/model_WIDE_AND_DEEP_1493043407\n",
    "    return 'models/model_' + model_type + '_' + str(int(time.time()))\n",
    "\n",
    "# Specify the desired model_dir \n",
    "def get_model(model_type, model_dir):\n",
    "    print(\"Model directory = %s\" % model_dir)\n",
    "    \n",
    "    # There are more options here than shown here. \n",
    "    # We are using this to show additional checkpointing for illustrative purposes.\n",
    "    # In a real system with far more samples, you would \n",
    "    #     likely choose to save checkpoints less frequently.\n",
    "    runconfig = tf.estimator.RunConfig(\n",
    "        save_checkpoints_secs=None,\n",
    "        save_checkpoints_steps = 100,\n",
    "    )\n",
    "    \n",
    "    m = None\n",
    "    \n",
    "    # Linear Classifier\n",
    "    if model_type == 'WIDE':\n",
    "        m = tf.estimator.LinearClassifier(\n",
    "            model_dir=model_dir, \n",
    "            feature_columns=wide_columns,\n",
    "            optimizer=tf.train.FtrlOptimizer(0.1, l2_regularization_strength=1.0))\n",
    "\n",
    "    # Deep Neural Net Classifier\n",
    "    if model_type == 'DEEP':\n",
    "        m = tf.estimator.DNNClassifier(\n",
    "            model_dir=model_dir,\n",
    "            feature_columns=deep_columns,\n",
    "            hidden_units=[128, 64, 16],\n",
    "            optimizer= tf.train.ProximalAdagradOptimizer(learning_rate=0.1, \\\n",
    "                            l1_regularization_strength=0.001,l2_regularization_strength=0.001),)\n",
    "\n",
    "    # Combined Linear and Deep Classifier\n",
    "    if model_type == 'WIDE_AND_DEEP':\n",
    "        m = tf.estimator.DNNLinearCombinedClassifier(\n",
    "            model_dir=model_dir,\n",
    "            linear_feature_columns=wide_columns,\n",
    "            linear_optimizer=tf.train.FtrlOptimizer(0.1, l2_regularization_strength=1.0),\n",
    "            dnn_feature_columns=deep_columns,\n",
    "            dnn_optimizer= tf.train.ProximalAdagradOptimizer(learning_rate=0.1, \\\n",
    "                            l1_regularization_strength=0.001,l2_regularization_strength=0.001),\n",
    "            dnn_hidden_units=[128, 64, 16],\n",
    "            config=runconfig)\n",
    "        \n",
    "    print('estimator built')\n",
    "    \n",
    "    return m"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Fit the model (train it)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# LOCAL. Update these paths as appropriate\n",
    "train_file = \"data/train.csv\"\n",
    "eval_file  = \"data/eval.csv\"\n",
    "train_sample_size = 800000\n",
    "train_steps = train_sample_size/BATCH_SIZE # 800000/400 = 2000"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "fit wide and deep model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model directory = models/model_WIDE_AND_DEEP_1564205992\n",
      "INFO:tensorflow:Using config: {'_model_dir': 'models/model_WIDE_AND_DEEP_1564205992', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n",
      "graph_options {\n",
      "  rewrite_options {\n",
      "    meta_optimizer_iterations: ONE\n",
      "  }\n",
      "}\n",
      ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x0000028F2A653F60>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n",
      "estimator built\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Colocations handled automatically by placer.\n",
      "WARNING:tensorflow:From <ipython-input-2-7b6e08de6b32>:5: string_input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(string_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\training\\input.py:278: input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(input_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\training\\input.py:190: limit_epochs (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensors(tensor).repeat(num_epochs)`.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\training\\input.py:199: QueueRunner.__init__ (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "To construct input pipelines, use the `tf.data` module.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\training\\input.py:199: add_queue_runner (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "To construct input pipelines, use the `tf.data` module.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\training\\input.py:202: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.cast instead.\n",
      "WARNING:tensorflow:From <ipython-input-2-7b6e08de6b32>:6: TextLineReader.__init__ (from tensorflow.python.ops.io_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.TextLineDataset`.\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\feature_column\\feature_column_v2.py:2997: HashedCategoricalColumn._num_buckets (from tensorflow.python.feature_column.feature_column_v2) is deprecated and will be removed after 2018-11-30.\n",
      "Instructions for updating:\n",
      "The old _FeatureColumn APIs are being deprecated. Please use the new FeatureColumn APIs instead.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\ops\\array_grad.py:425: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.cast instead.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\training\\monitored_session.py:809: start_queue_runners (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "To construct input pipelines, use the `tf.data` module.\n",
      "INFO:tensorflow:Saving checkpoints for 0 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:loss = 398723.75, step = 1\n",
      "INFO:tensorflow:Saving checkpoints for 100 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 13.6201\n",
      "INFO:tensorflow:loss = 180.5625, step = 101 (7.342 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 200 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.9787\n",
      "INFO:tensorflow:loss = 203.93141, step = 201 (5.900 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 300 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 17.1295\n",
      "INFO:tensorflow:loss = 181.53152, step = 301 (5.828 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 400 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 17.0383\n",
      "INFO:tensorflow:loss = 209.62898, step = 401 (5.869 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 500 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\training\\saver.py:966: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to delete files with this prefix.\n",
      "INFO:tensorflow:global_step/sec: 16.7594\n",
      "INFO:tensorflow:loss = 227.95667, step = 501 (5.967 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 600 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.9501\n",
      "INFO:tensorflow:loss = 188.264, step = 601 (5.900 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 700 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.4552\n",
      "INFO:tensorflow:loss = 195.31915, step = 701 (6.088 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 800 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 17.4847\n",
      "INFO:tensorflow:loss = 219.70407, step = 801 (5.709 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 900 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.7145\n",
      "INFO:tensorflow:loss = 199.2433, step = 901 (5.983 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1000 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.8665\n",
      "INFO:tensorflow:loss = 193.58105, step = 1001 (5.939 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1100 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.6013\n",
      "INFO:tensorflow:loss = 224.07233, step = 1101 (6.019 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1200 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.8054\n",
      "INFO:tensorflow:loss = 202.38179, step = 1201 (5.952 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1300 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.18\n",
      "INFO:tensorflow:loss = 190.314, step = 1301 (6.174 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1400 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 17.0375\n",
      "INFO:tensorflow:loss = 174.3645, step = 1401 (5.869 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1500 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:global_step/sec: 17.5121\n",
      "INFO:tensorflow:loss = 190.64795, step = 1501 (5.710 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1600 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.9157\n",
      "INFO:tensorflow:loss = 177.06445, step = 1601 (5.919 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1700 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.9181\n",
      "INFO:tensorflow:loss = 203.56302, step = 1701 (5.909 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1800 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 16.4674\n",
      "INFO:tensorflow:loss = 208.903, step = 1801 (6.067 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1900 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 17.1099\n",
      "INFO:tensorflow:loss = 194.56729, step = 1901 (5.852 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 2000 into models/model_WIDE_AND_DEEP_1564205992\\model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 201.61578.\n",
      "wide and deep model fit done\n",
      "Wall time: 2min 37s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "MODEL_TYPE = 'WIDE_AND_DEEP'\n",
    "model_dir = create_model_dir(model_type=MODEL_TYPE)\n",
    "m_wd = get_model(model_type=MODEL_TYPE, model_dir=model_dir)\n",
    "\n",
    "m_wd.train(input_fn=generate_input_fn(train_file, BATCH_SIZE), steps=train_steps)\n",
    "\n",
    "print('wide and deep model fit done')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "fit wide model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model directory = models/model_WIDE_1564206160\n",
      "INFO:tensorflow:Using default config.\n",
      "INFO:tensorflow:Using config: {'_model_dir': 'models/model_WIDE_1564206160', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true\n",
      "graph_options {\n",
      "  rewrite_options {\n",
      "    meta_optimizer_iterations: ONE\n",
      "  }\n",
      "}\n",
      ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x0000028F5C320198>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n",
      "estimator built\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Saving checkpoints for 0 into models/model_WIDE_1564206160\\model.ckpt.\n",
      "INFO:tensorflow:loss = 277.25897, step = 1\n",
      "INFO:tensorflow:global_step/sec: 71.5631\n",
      "INFO:tensorflow:loss = 181.4563, step = 101 (1.397 sec)\n",
      "INFO:tensorflow:global_step/sec: 166.292\n",
      "INFO:tensorflow:loss = 207.69904, step = 201 (0.601 sec)\n",
      "INFO:tensorflow:global_step/sec: 155.004\n",
      "INFO:tensorflow:loss = 184.21474, step = 301 (0.645 sec)\n",
      "INFO:tensorflow:global_step/sec: 155.994\n",
      "INFO:tensorflow:loss = 211.92517, step = 401 (0.641 sec)\n",
      "INFO:tensorflow:global_step/sec: 168.505\n",
      "INFO:tensorflow:loss = 228.10217, step = 501 (0.593 sec)\n",
      "INFO:tensorflow:global_step/sec: 169.769\n",
      "INFO:tensorflow:loss = 189.48032, step = 601 (0.589 sec)\n",
      "INFO:tensorflow:global_step/sec: 162.129\n",
      "INFO:tensorflow:loss = 197.08794, step = 701 (0.617 sec)\n",
      "INFO:tensorflow:global_step/sec: 166.065\n",
      "INFO:tensorflow:loss = 223.11761, step = 801 (0.602 sec)\n",
      "INFO:tensorflow:global_step/sec: 169.089\n",
      "INFO:tensorflow:loss = 200.125, step = 901 (0.591 sec)\n",
      "INFO:tensorflow:global_step/sec: 168.33\n",
      "INFO:tensorflow:loss = 197.54558, step = 1001 (0.614 sec)\n",
      "INFO:tensorflow:global_step/sec: 160.093\n",
      "INFO:tensorflow:loss = 227.97833, step = 1101 (0.605 sec)\n",
      "INFO:tensorflow:global_step/sec: 169.346\n",
      "INFO:tensorflow:loss = 208.80013, step = 1201 (0.591 sec)\n",
      "INFO:tensorflow:global_step/sec: 169.359\n",
      "INFO:tensorflow:loss = 187.96533, step = 1301 (0.590 sec)\n",
      "INFO:tensorflow:global_step/sec: 169.737\n",
      "INFO:tensorflow:loss = 178.3165, step = 1401 (0.589 sec)\n",
      "INFO:tensorflow:global_step/sec: 156.962\n",
      "INFO:tensorflow:loss = 193.28058, step = 1501 (0.637 sec)\n",
      "INFO:tensorflow:global_step/sec: 159.824\n",
      "INFO:tensorflow:loss = 187.61307, step = 1601 (0.626 sec)\n",
      "INFO:tensorflow:global_step/sec: 156.4\n",
      "INFO:tensorflow:loss = 210.60104, step = 1701 (0.639 sec)\n",
      "INFO:tensorflow:global_step/sec: 152.82\n",
      "INFO:tensorflow:loss = 215.8465, step = 1801 (0.654 sec)\n",
      "INFO:tensorflow:global_step/sec: 168.787\n",
      "INFO:tensorflow:loss = 201.75375, step = 1901 (0.592 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 2000 into models/model_WIDE_1564206160\\model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 207.43643.\n",
      "wide model fit done\n"
     ]
    }
   ],
   "source": [
    "MODEL_TYPE = 'WIDE'\n",
    "model_dir = create_model_dir(model_type=MODEL_TYPE)\n",
    "m_wide = get_model(model_type=MODEL_TYPE, model_dir=model_dir)\n",
    "\n",
    "m_wide.train(input_fn=generate_input_fn(train_file, BATCH_SIZE), steps=train_steps)\n",
    "\n",
    "print('wide model fit done')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "fit deep model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model directory = models/model_DEEP_1564206195\n",
      "INFO:tensorflow:Using default config.\n",
      "INFO:tensorflow:Using config: {'_model_dir': 'models/model_DEEP_1564206195', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true\n",
      "graph_options {\n",
      "  rewrite_options {\n",
      "    meta_optimizer_iterations: ONE\n",
      "  }\n",
      "}\n",
      ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x0000028F5B8E77B8>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n",
      "estimator built\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Saving checkpoints for 0 into models/model_DEEP_1564206195\\model.ckpt.\n",
      "INFO:tensorflow:loss = 20052.895, step = 1\n",
      "INFO:tensorflow:global_step/sec: 55.1847\n",
      "INFO:tensorflow:loss = 215.90666, step = 101 (1.812 sec)\n",
      "INFO:tensorflow:global_step/sec: 110.789\n",
      "INFO:tensorflow:loss = 235.5365, step = 201 (0.913 sec)\n",
      "INFO:tensorflow:global_step/sec: 109.887\n",
      "INFO:tensorflow:loss = 262.70398, step = 301 (0.900 sec)\n",
      "INFO:tensorflow:global_step/sec: 115.735\n",
      "INFO:tensorflow:loss = 220.09802, step = 401 (0.864 sec)\n",
      "INFO:tensorflow:global_step/sec: 113.277\n",
      "INFO:tensorflow:loss = 247.53798, step = 501 (0.883 sec)\n",
      "INFO:tensorflow:global_step/sec: 102.295\n",
      "INFO:tensorflow:loss = 200.37791, step = 601 (0.978 sec)\n",
      "INFO:tensorflow:global_step/sec: 116.527\n",
      "INFO:tensorflow:loss = 213.64032, step = 701 (0.868 sec)\n",
      "INFO:tensorflow:global_step/sec: 115.726\n",
      "INFO:tensorflow:loss = 234.68028, step = 801 (0.854 sec)\n",
      "INFO:tensorflow:global_step/sec: 115.232\n",
      "INFO:tensorflow:loss = 223.21991, step = 901 (0.868 sec)\n",
      "INFO:tensorflow:global_step/sec: 115.218\n",
      "INFO:tensorflow:loss = 218.15079, step = 1001 (0.868 sec)\n",
      "INFO:tensorflow:global_step/sec: 98.7486\n",
      "INFO:tensorflow:loss = 241.23785, step = 1101 (1.023 sec)\n",
      "INFO:tensorflow:global_step/sec: 92.5998\n",
      "INFO:tensorflow:loss = 226.13986, step = 1201 (1.082 sec)\n",
      "INFO:tensorflow:global_step/sec: 95.4201\n",
      "INFO:tensorflow:loss = 210.21223, step = 1301 (1.046 sec)\n",
      "INFO:tensorflow:global_step/sec: 99.0098\n",
      "INFO:tensorflow:loss = 194.98785, step = 1401 (1.000 sec)\n",
      "INFO:tensorflow:global_step/sec: 99.2862\n",
      "INFO:tensorflow:loss = 216.04953, step = 1501 (1.007 sec)\n",
      "INFO:tensorflow:global_step/sec: 100.155\n",
      "INFO:tensorflow:loss = 200.82318, step = 1601 (1.028 sec)\n",
      "INFO:tensorflow:global_step/sec: 98.5037\n",
      "INFO:tensorflow:loss = 215.2063, step = 1701 (0.985 sec)\n",
      "INFO:tensorflow:global_step/sec: 100.576\n",
      "INFO:tensorflow:loss = 217.18033, step = 1801 (0.994 sec)\n",
      "INFO:tensorflow:global_step/sec: 99.4576\n",
      "INFO:tensorflow:loss = 204.51273, step = 1901 (1.005 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 2000 into models/model_DEEP_1564206195\\model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 223.48927.\n",
      "deep model fit done\n"
     ]
    }
   ],
   "source": [
    "MODEL_TYPE = 'DEEP'\n",
    "model_dir = create_model_dir(model_type=MODEL_TYPE)\n",
    "m_deep = get_model(model_type=MODEL_TYPE, model_dir=model_dir)\n",
    "\n",
    "m_deep.train(input_fn=generate_input_fn(train_file, BATCH_SIZE), steps=train_steps)\n",
    "\n",
    "print('deep model fit done')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Evaluate the accuracy of the model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "evaluate wide and deep model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Calling model_fn.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\ops\\metrics_impl.py:2002: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Deprecated in favor of operator or tf.math.divide.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Starting evaluation at 2019-07-27T05:55:04Z\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "WARNING:tensorflow:From D:\\Program Files\\Anaconda\\lib\\site-packages\\tensorflow\\python\\training\\saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to check for files with this prefix.\n",
      "INFO:tensorflow:Restoring parameters from models/model_WIDE_AND_DEEP_1564205992\\model.ckpt-2000\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Evaluation [50/500]\n",
      "INFO:tensorflow:Evaluation [100/500]\n",
      "INFO:tensorflow:Evaluation [150/500]\n",
      "INFO:tensorflow:Evaluation [200/500]\n",
      "INFO:tensorflow:Evaluation [250/500]\n",
      "INFO:tensorflow:Evaluation [300/500]\n",
      "INFO:tensorflow:Evaluation [350/500]\n",
      "INFO:tensorflow:Evaluation [400/500]\n",
      "INFO:tensorflow:Evaluation [450/500]\n",
      "INFO:tensorflow:Evaluation [500/500]\n",
      "INFO:tensorflow:Finished evaluation at 2019-07-27-05:55:11\n",
      "INFO:tensorflow:Saving dict for global step 2000: accuracy = 0.772235, accuracy_baseline = 0.74883497, auc = 0.7419083, auc_precision_recall = 0.5073214, average_loss = 0.4897562, global_step = 2000, label/mean = 0.251165, loss = 195.90248, precision = 0.60390764, prediction/mean = 0.26304975, recall = 0.27073836\n",
      "INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2000: models/model_WIDE_AND_DEEP_1564205992\\model.ckpt-2000\n",
      "wide and deep model evaluate done\n",
      "Accuracy: 0.772235\n",
      "{'accuracy': 0.772235, 'accuracy_baseline': 0.74883497, 'auc': 0.7419083, 'auc_precision_recall': 0.5073214, 'average_loss': 0.4897562, 'label/mean': 0.251165, 'loss': 195.90248, 'precision': 0.60390764, 'prediction/mean': 0.26304975, 'recall': 0.27073836, 'global_step': 2000}\n",
      "Wall time: 18.1 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "eval_sample_size = 200000 # this can be found with a 'wc -l eval.csv'\n",
    "eval_steps = eval_sample_size/BATCH_SIZE # 200000/400 = 500\n",
    "\n",
    "results_wd = m_wd.evaluate(input_fn=generate_input_fn(eval_file, BATCH_SIZE),\\\n",
    "                     steps=eval_steps)\n",
    "print('wide and deep model evaluate done')\n",
    "\n",
    "print('Accuracy: %s' % results_wd['accuracy'])\n",
    "print(results_wd)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "evaluate wide model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Calling model_fn.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Starting evaluation at 2019-07-27T05:59:38Z\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from models/model_WIDE_1564206160\\model.ckpt-2000\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Evaluation [50/500]\n",
      "INFO:tensorflow:Evaluation [100/500]\n",
      "INFO:tensorflow:Evaluation [150/500]\n",
      "INFO:tensorflow:Evaluation [200/500]\n",
      "INFO:tensorflow:Evaluation [250/500]\n",
      "INFO:tensorflow:Evaluation [300/500]\n",
      "INFO:tensorflow:Evaluation [350/500]\n",
      "INFO:tensorflow:Evaluation [400/500]\n",
      "INFO:tensorflow:Evaluation [450/500]\n",
      "INFO:tensorflow:Evaluation [500/500]\n",
      "INFO:tensorflow:Finished evaluation at 2019-07-27-05:59:42\n",
      "INFO:tensorflow:Saving dict for global step 2000: accuracy = 0.766725, accuracy_baseline = 0.74883497, auc = 0.7276709, auc_precision_recall = 0.4817805, average_loss = 0.49881992, global_step = 2000, label/mean = 0.251165, loss = 199.52797, precision = 0.58729386, prediction/mean = 0.26319915, recall = 0.23960344\n",
      "INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2000: models/model_WIDE_1564206160\\model.ckpt-2000\n",
      "wide model evaluate done\n",
      "Accuracy: 0.766725\n",
      "{'accuracy': 0.766725, 'accuracy_baseline': 0.74883497, 'auc': 0.7276709, 'auc_precision_recall': 0.4817805, 'average_loss': 0.49881992, 'label/mean': 0.251165, 'loss': 199.52797, 'precision': 0.58729386, 'prediction/mean': 0.26319915, 'recall': 0.23960344, 'global_step': 2000}\n"
     ]
    }
   ],
   "source": [
    "results_wide = m_wide.evaluate(input_fn=generate_input_fn(eval_file, BATCH_SIZE),\\\n",
    "                               steps=eval_steps)\n",
    "print('wide model evaluate done')\n",
    "\n",
    "print('Accuracy: %s' % results_wide['accuracy'])\n",
    "print(results_wide)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "evaluate deep model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Calling model_fn.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Starting evaluation at 2019-07-27T06:00:19Z\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from models/model_DEEP_1564206195\\model.ckpt-2000\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Evaluation [50/500]\n",
      "INFO:tensorflow:Evaluation [100/500]\n",
      "INFO:tensorflow:Evaluation [150/500]\n",
      "INFO:tensorflow:Evaluation [200/500]\n",
      "INFO:tensorflow:Evaluation [250/500]\n",
      "INFO:tensorflow:Evaluation [300/500]\n",
      "INFO:tensorflow:Evaluation [350/500]\n",
      "INFO:tensorflow:Evaluation [400/500]\n",
      "INFO:tensorflow:Evaluation [450/500]\n",
      "INFO:tensorflow:Evaluation [500/500]\n",
      "INFO:tensorflow:Finished evaluation at 2019-07-27-06:00:24\n",
      "INFO:tensorflow:Saving dict for global step 2000: accuracy = 0.76307, accuracy_baseline = 0.74883497, auc = 0.66074526, auc_precision_recall = 0.4300678, average_loss = 0.5328898, global_step = 2000, label/mean = 0.251165, loss = 213.15591, precision = 0.6330747, prediction/mean = 0.24876134, recall = 0.13481177\n",
      "INFO:tensorflow:Saving 'checkpoint_path' summary for global step 2000: models/model_DEEP_1564206195\\model.ckpt-2000\n",
      "deep model evaluate done\n",
      "Accuracy: 0.76307\n",
      "{'accuracy': 0.76307, 'accuracy_baseline': 0.74883497, 'auc': 0.66074526, 'auc_precision_recall': 0.4300678, 'average_loss': 0.5328898, 'label/mean': 0.251165, 'loss': 213.15591, 'precision': 0.6330747, 'prediction/mean': 0.24876134, 'recall': 0.13481177, 'global_step': 2000}\n"
     ]
    }
   ],
   "source": [
    "results_deep = m_deep.evaluate(input_fn=generate_input_fn(eval_file, BATCH_SIZE),\\\n",
    "                               steps=eval_steps)\n",
    "print('deep model evaluate done')\n",
    "\n",
    "print('Accuracy: %s' % results_deep['accuracy'])\n",
    "print(results_deep)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### results analysis\n",
    "wide and deep模型accuracy最高，average_loss最小，但相对于wide model和deep model准确度提高不大，并且3个模型的精度都不高，可能是由于构建特征时，wide model只用了CATEGORICAL_COLUMNS，没有对CONTINUOUS_COLUMNS进行bucketize，也没有进行feature cross,并且只迭代了2000次。  \n",
    "总之，这只是一个base版本的model，需要进一步迭代优化，通过base model的搭建，基本理解了wide and deep模型。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>wd</th>\n",
       "      <th>wide</th>\n",
       "      <th>deep</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>accuracy</th>\n",
       "      <td>0.772235</td>\n",
       "      <td>0.766725</td>\n",
       "      <td>0.76307</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>average_loss</th>\n",
       "      <td>0.489756</td>\n",
       "      <td>0.498820</td>\n",
       "      <td>0.53289</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                    wd      wide     deep\n",
       "accuracy      0.772235  0.766725  0.76307\n",
       "average_loss  0.489756  0.498820  0.53289"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import pandas as pd\n",
    "data = [[results_wd['accuracy'],results_wide['accuracy'],results_deep['accuracy']],\\\n",
    "        [results_wd['average_loss'],results_wide['average_loss'],results_deep['average_loss']]]\n",
    "columns = [\"wd\",\"wide\",\"deep\"]\n",
    "index = [\"accuracy\",\"average_loss\"]\n",
    "results = pd.DataFrame(data=data, columns=columns,index=index)\n",
    "results"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
