{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using TensorFlow version 1.13.1\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 首先 import 必要的模块\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "import time\n",
    "tf.logging.set_verbosity(tf.logging.INFO) # Set to INFO for tracking training, default is WARN. ERROR for least messages\n",
    "\n",
    "print(\"Using TensorFlow version %s\\n\" % (tf.__version__))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "columns are:  ['clicked', 'I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'] \n",
      "\n"
     ]
    }
   ],
   "source": [
    "CONTINUOUS_COLUMNS =  [\"I\"+str(i) for i in range(1,14)] # 1-13 inclusive\n",
    "CATEGORICAL_COLUMNS = [\"C\"+str(i) for i in range(1,27)] # 1-26 inclusive\n",
    "LABEL_COLUMN = [\"clicked\"]\n",
    "\n",
    "TRAIN_DATA_COLUMNS = LABEL_COLUMN + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS\n",
    "# TEST_DATA_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS\n",
    "\n",
    "FEATURE_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS\n",
    "\n",
    "print('columns are: ', TRAIN_DATA_COLUMNS, '\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>clicked</th>\n",
       "      <th>I1</th>\n",
       "      <th>I2</th>\n",
       "      <th>I3</th>\n",
       "      <th>I4</th>\n",
       "      <th>I5</th>\n",
       "      <th>I6</th>\n",
       "      <th>I7</th>\n",
       "      <th>I8</th>\n",
       "      <th>I9</th>\n",
       "      <th>...</th>\n",
       "      <th>C17</th>\n",
       "      <th>C18</th>\n",
       "      <th>C19</th>\n",
       "      <th>C20</th>\n",
       "      <th>C21</th>\n",
       "      <th>C22</th>\n",
       "      <th>C23</th>\n",
       "      <th>C24</th>\n",
       "      <th>C25</th>\n",
       "      <th>C26</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>5</td>\n",
       "      <td>0</td>\n",
       "      <td>1382</td>\n",
       "      <td>4</td>\n",
       "      <td>15</td>\n",
       "      <td>2</td>\n",
       "      <td>181</td>\n",
       "      <td>...</td>\n",
       "      <td>e5ba7672</td>\n",
       "      <td>f54016b9</td>\n",
       "      <td>21ddcdc9</td>\n",
       "      <td>b1252a9d</td>\n",
       "      <td>07b5194c</td>\n",
       "      <td>0</td>\n",
       "      <td>3a171ecb</td>\n",
       "      <td>c5c50484</td>\n",
       "      <td>e8b83407</td>\n",
       "      <td>9727dd16</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>44</td>\n",
       "      <td>1</td>\n",
       "      <td>102</td>\n",
       "      <td>8</td>\n",
       "      <td>2</td>\n",
       "      <td>2</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>07c540c4</td>\n",
       "      <td>b04e4670</td>\n",
       "      <td>21ddcdc9</td>\n",
       "      <td>5840adea</td>\n",
       "      <td>60f6221e</td>\n",
       "      <td>0</td>\n",
       "      <td>3a171ecb</td>\n",
       "      <td>43f13e8b</td>\n",
       "      <td>e8b83407</td>\n",
       "      <td>731c3655</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>14</td>\n",
       "      <td>767</td>\n",
       "      <td>89</td>\n",
       "      <td>4</td>\n",
       "      <td>2</td>\n",
       "      <td>245</td>\n",
       "      <td>...</td>\n",
       "      <td>8efede7f</td>\n",
       "      <td>3412118d</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>e587c466</td>\n",
       "      <td>ad3062eb</td>\n",
       "      <td>3a171ecb</td>\n",
       "      <td>3b183c5c</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>893</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>4392</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>...</td>\n",
       "      <td>1e88c74f</td>\n",
       "      <td>74ef3502</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>6b3a5ca6</td>\n",
       "      <td>0</td>\n",
       "      <td>3a171ecb</td>\n",
       "      <td>9117a34a</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0</td>\n",
       "      <td>3</td>\n",
       "      <td>-1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>3</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>...</td>\n",
       "      <td>1e88c74f</td>\n",
       "      <td>26b3c7a7</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>21c9516a</td>\n",
       "      <td>0</td>\n",
       "      <td>32c7478e</td>\n",
       "      <td>b34f3128</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 40 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "   clicked  I1   I2  I3  I4    I5  I6  I7  I8   I9  ...        C17        C18  \\\n",
       "0        0   1    1   5   0  1382   4  15   2  181  ...   e5ba7672   f54016b9   \n",
       "1        0   2    0  44   1   102   8   2   2    4  ...   07c540c4   b04e4670   \n",
       "2        0   2    0   1  14   767  89   4   2  245  ...   8efede7f   3412118d   \n",
       "3        0   0  893   0   0  4392   0   0   0    0  ...   1e88c74f   74ef3502   \n",
       "4        0   3   -1   0   0     2   0   3   0    0  ...   1e88c74f   26b3c7a7   \n",
       "\n",
       "         C19        C20        C21        C22        C23        C24  \\\n",
       "0   21ddcdc9   b1252a9d   07b5194c          0   3a171ecb   c5c50484   \n",
       "1   21ddcdc9   5840adea   60f6221e          0   3a171ecb   43f13e8b   \n",
       "2          0          0   e587c466   ad3062eb   3a171ecb   3b183c5c   \n",
       "3          0          0   6b3a5ca6          0   3a171ecb   9117a34a   \n",
       "4          0          0   21c9516a          0   32c7478e   b34f3128   \n",
       "\n",
       "         C25        C26  \n",
       "0   e8b83407   9727dd16  \n",
       "1   e8b83407   731c3655  \n",
       "2          0          0  \n",
       "3          0          0  \n",
       "4          0          0  \n",
       "\n",
       "[5 rows x 40 columns]"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 读取数据\n",
    "# path to where the data lies\n",
    "dpath = './data/'\n",
    "train = pd.read_csv(dpath +\"train.csv\", names = TRAIN_DATA_COLUMNS)\n",
    "train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 800000 entries, 0 to 799999\n",
      "Data columns (total 40 columns):\n",
      "clicked    800000 non-null int64\n",
      "I1         800000 non-null int64\n",
      "I2         800000 non-null int64\n",
      "I3         800000 non-null int64\n",
      "I4         800000 non-null int64\n",
      "I5         800000 non-null int64\n",
      "I6         800000 non-null int64\n",
      "I7         800000 non-null int64\n",
      "I8         800000 non-null int64\n",
      "I9         800000 non-null int64\n",
      "I10        800000 non-null int64\n",
      "I11        800000 non-null int64\n",
      "I12        800000 non-null int64\n",
      "I13        800000 non-null int64\n",
      "C1         800000 non-null object\n",
      "C2         800000 non-null object\n",
      "C3         800000 non-null object\n",
      "C4         800000 non-null object\n",
      "C5         800000 non-null object\n",
      "C6         800000 non-null object\n",
      "C7         800000 non-null object\n",
      "C8         800000 non-null object\n",
      "C9         800000 non-null object\n",
      "C10        800000 non-null object\n",
      "C11        800000 non-null object\n",
      "C12        800000 non-null object\n",
      "C13        800000 non-null object\n",
      "C14        800000 non-null object\n",
      "C15        800000 non-null object\n",
      "C16        800000 non-null object\n",
      "C17        800000 non-null object\n",
      "C18        800000 non-null object\n",
      "C19        800000 non-null object\n",
      "C20        800000 non-null object\n",
      "C21        800000 non-null object\n",
      "C22        800000 non-null object\n",
      "C23        800000 non-null object\n",
      "C24        800000 non-null object\n",
      "C25        800000 non-null object\n",
      "C26        800000 non-null object\n",
      "dtypes: int64(14), object(26)\n",
      "memory usage: 244.1+ MB\n"
     ]
    }
   ],
   "source": [
    "train.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>C1</th>\n",
       "      <th>C2</th>\n",
       "      <th>C3</th>\n",
       "      <th>C4</th>\n",
       "      <th>C5</th>\n",
       "      <th>C6</th>\n",
       "      <th>C7</th>\n",
       "      <th>C8</th>\n",
       "      <th>C9</th>\n",
       "      <th>C10</th>\n",
       "      <th>...</th>\n",
       "      <th>C17</th>\n",
       "      <th>C18</th>\n",
       "      <th>C19</th>\n",
       "      <th>C20</th>\n",
       "      <th>C21</th>\n",
       "      <th>C22</th>\n",
       "      <th>C23</th>\n",
       "      <th>C24</th>\n",
       "      <th>C25</th>\n",
       "      <th>C26</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>97679</td>\n",
       "      <td>534271</td>\n",
       "      <td>757992</td>\n",
       "      <td>591693</td>\n",
       "      <td>538617</td>\n",
       "      <td>647062</td>\n",
       "      <td>130630</td>\n",
       "      <td>501625</td>\n",
       "      <td>154019</td>\n",
       "      <td>312753</td>\n",
       "      <td>...</td>\n",
       "      <td>229781</td>\n",
       "      <td>33012</td>\n",
       "      <td>449480</td>\n",
       "      <td>597070</td>\n",
       "      <td>22322</td>\n",
       "      <td>489075</td>\n",
       "      <td>526123</td>\n",
       "      <td>18188</td>\n",
       "      <td>624540</td>\n",
       "      <td>459748</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>97679</td>\n",
       "      <td>903788</td>\n",
       "      <td>290456</td>\n",
       "      <td>170201</td>\n",
       "      <td>538617</td>\n",
       "      <td>707633</td>\n",
       "      <td>110173</td>\n",
       "      <td>491066</td>\n",
       "      <td>154019</td>\n",
       "      <td>804070</td>\n",
       "      <td>...</td>\n",
       "      <td>112883</td>\n",
       "      <td>67712</td>\n",
       "      <td>449480</td>\n",
       "      <td>624954</td>\n",
       "      <td>90000</td>\n",
       "      <td>489075</td>\n",
       "      <td>526123</td>\n",
       "      <td>51918</td>\n",
       "      <td>624540</td>\n",
       "      <td>980092</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>261106</td>\n",
       "      <td>93319</td>\n",
       "      <td>37008</td>\n",
       "      <td>245002</td>\n",
       "      <td>538617</td>\n",
       "      <td>647062</td>\n",
       "      <td>934778</td>\n",
       "      <td>491066</td>\n",
       "      <td>154019</td>\n",
       "      <td>100922</td>\n",
       "      <td>...</td>\n",
       "      <td>630056</td>\n",
       "      <td>978906</td>\n",
       "      <td>489075</td>\n",
       "      <td>489075</td>\n",
       "      <td>370941</td>\n",
       "      <td>486505</td>\n",
       "      <td>526123</td>\n",
       "      <td>584497</td>\n",
       "      <td>489075</td>\n",
       "      <td>489075</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>97679</td>\n",
       "      <td>489030</td>\n",
       "      <td>533258</td>\n",
       "      <td>386938</td>\n",
       "      <td>538617</td>\n",
       "      <td>707633</td>\n",
       "      <td>619435</td>\n",
       "      <td>491066</td>\n",
       "      <td>154019</td>\n",
       "      <td>114153</td>\n",
       "      <td>...</td>\n",
       "      <td>191911</td>\n",
       "      <td>628504</td>\n",
       "      <td>489075</td>\n",
       "      <td>489075</td>\n",
       "      <td>987219</td>\n",
       "      <td>489075</td>\n",
       "      <td>526123</td>\n",
       "      <td>881448</td>\n",
       "      <td>489075</td>\n",
       "      <td>489075</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>684670</td>\n",
       "      <td>714541</td>\n",
       "      <td>683848</td>\n",
       "      <td>650040</td>\n",
       "      <td>538617</td>\n",
       "      <td>551549</td>\n",
       "      <td>120303</td>\n",
       "      <td>491066</td>\n",
       "      <td>154019</td>\n",
       "      <td>479009</td>\n",
       "      <td>...</td>\n",
       "      <td>191911</td>\n",
       "      <td>312248</td>\n",
       "      <td>489075</td>\n",
       "      <td>489075</td>\n",
       "      <td>444925</td>\n",
       "      <td>489075</td>\n",
       "      <td>86044</td>\n",
       "      <td>172918</td>\n",
       "      <td>489075</td>\n",
       "      <td>489075</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 26 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       C1      C2      C3      C4      C5      C6      C7      C8      C9  \\\n",
       "0   97679  534271  757992  591693  538617  647062  130630  501625  154019   \n",
       "1   97679  903788  290456  170201  538617  707633  110173  491066  154019   \n",
       "2  261106   93319   37008  245002  538617  647062  934778  491066  154019   \n",
       "3   97679  489030  533258  386938  538617  707633  619435  491066  154019   \n",
       "4  684670  714541  683848  650040  538617  551549  120303  491066  154019   \n",
       "\n",
       "      C10  ...     C17     C18     C19     C20     C21     C22     C23  \\\n",
       "0  312753  ...  229781   33012  449480  597070   22322  489075  526123   \n",
       "1  804070  ...  112883   67712  449480  624954   90000  489075  526123   \n",
       "2  100922  ...  630056  978906  489075  489075  370941  486505  526123   \n",
       "3  114153  ...  191911  628504  489075  489075  987219  489075  526123   \n",
       "4  479009  ...  191911  312248  489075  489075  444925  489075   86044   \n",
       "\n",
       "      C24     C25     C26  \n",
       "0   18188  624540  459748  \n",
       "1   51918  624540  980092  \n",
       "2  584497  489075  489075  \n",
       "3  881448  489075  489075  \n",
       "4  172918  489075  489075  \n",
       "\n",
       "[5 rows x 26 columns]"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#方法1，利用hash函数转text数据为hash编码\n",
    "import hashlib\n",
    "NR_BINS = 1000000\n",
    "def hashstr(input):\n",
    "    return str(int(hashlib.md5(input.encode('utf8')).hexdigest(), 16)%(NR_BINS-1)+1)\n",
    "\n",
    "\n",
    "train[CATEGORICAL_COLUMNS] = train[CATEGORICAL_COLUMNS].applymap(lambda row: hashstr(row))\n",
    "train[CATEGORICAL_COLUMNS].head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "wide_columns = []\n",
    "for name in CATEGORICAL_COLUMNS:\n",
    "    wide_columns.append(tf.contrib.layers.sparse_column_with_hash_bucket(\n",
    "            name, hash_bucket_size=1000))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "deep_columns = []\n",
    "for name in CONTINUOUS_COLUMNS:\n",
    "    deep_columns.append(tf.contrib.layers.real_valued_column(name))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "WARNING:tensorflow:The default stddev value of initializer was changed from \"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" in core implementation (tf.feature_column.embedding_column).\n",
      "wide and deep columns configured\n"
     ]
    }
   ],
   "source": [
    "# Embeddings for wide columns into deep columns\n",
    "for col in wide_columns:\n",
    "    deep_columns.append(tf.contrib.layers.embedding_column(col, \n",
    "                                                           dimension=8))\n",
    "\n",
    "print('wide and deep columns configured')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Create the model\n",
    "\n",
    "You can train either a \"wide\" model, a \"deep\" model, or a \"wide and deep\" model, using the classifiers below. Try each one and see what kind of results you get.\n",
    "\n",
    "* **Wide**: Linear Classifier\n",
    "* **Deep**: Deep Neural Net Classifier\n",
    "* **Wide & Deep**: Combined Linear and Deep Classifier\n",
    "\n",
    "The `hidden_units` or `dnn_hidden_units` argument is to specify the size of each layer of the deep portion of the network. For example, `[12, 20, 15]` would create a network with the first layer of size 12, the second layer of size 20, and a third layer of size 15."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_model_dir(model_type):\n",
    "    # Returns something like models/model_WIDE_AND_DEEP_1493043407\n",
    "    return 'models/model_' + model_type + '_' + str(int(time.time()))\n",
    "\n",
    "# Specify the desired model_dir \n",
    "def get_model(model_type, model_dir):\n",
    "    print(\"Model directory = %s\" % model_dir)\n",
    "    \n",
    "    # There are more options here than shown here. \n",
    "    # We are using this to show additional checkpointing for illustrative purposes.\n",
    "    # In a real system with far more samples, you would \n",
    "    #     likely choose to save checkpoints less frequently.\n",
    "    runconfig = tf.contrib.learn.RunConfig(\n",
    "        save_checkpoints_secs=None,\n",
    "        save_checkpoints_steps = 100,\n",
    "    )\n",
    "    \n",
    "    m = None\n",
    "    \n",
    "    # Linear Classifier\n",
    "    if model_type == 'WIDE':\n",
    "        m = tf.contrib.learn.LinearClassifier(\n",
    "            model_dir=model_dir, \n",
    "            feature_columns=wide_columns)\n",
    "\n",
    "    # Deep Neural Net Classifier\n",
    "    if model_type == 'DEEP':\n",
    "        m = tf.contrib.learn.DNNClassifier(\n",
    "            model_dir=model_dir,\n",
    "            feature_columns=deep_columns,\n",
    "            hidden_units=[100, 50, 25])\n",
    "\n",
    "    # Combined Linear and Deep Classifier\n",
    "    if model_type == 'WIDE_AND_DEEP':\n",
    "        m = tf.contrib.learn.DNNLinearCombinedClassifier(\n",
    "            model_dir=model_dir,\n",
    "            linear_feature_columns=wide_columns,\n",
    "            dnn_feature_columns=deep_columns,\n",
    "            dnn_hidden_units=[100, 70, 50, 25],\n",
    "            config=runconfig)\n",
    "        \n",
    "    print('estimator built')\n",
    "    \n",
    "    return m"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model directory = models/model_WIDE_AND_DEEP_1563514182\n",
      "WARNING:tensorflow:From <ipython-input-9-121e8ac15627>:15: RunConfig.__init__ (from tensorflow.contrib.learn.python.learn.estimators.run_config) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "When switching to tf.estimator.Estimator, use tf.estimator.RunConfig instead.\n",
      "WARNING:tensorflow:From <ipython-input-9-121e8ac15627>:40: calling DNNLinearCombinedClassifier.__init__ (from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined) with fix_global_step_increment_bug=False is deprecated and will be removed after 2017-04-15.\n",
      "Instructions for updating:\n",
      "Please set fix_global_step_increment_bug=True and update training steps in your pipeline. See pydoc for details.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:676: multi_class_head (from tensorflow.contrib.learn.python.learn.estimators.head) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please switch to tf.contrib.estimator.*_head.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py:1179: BaseEstimator.__init__ (from tensorflow.contrib.learn.python.learn.estimators.estimator) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please replace uses of any Estimator from tf.contrib.learn with an Estimator from tf.estimator.*\n",
      "INFO:tensorflow:Using config: {'_task_type': None, '_task_id': 0, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x1a42fefef0>, '_master': '', '_num_ps_replicas': 0, '_num_worker_replicas': 0, '_environment': 'local', '_is_chief': True, '_evaluation_master': '', '_train_distribute': None, '_eval_distribute': None, '_device_fn': None, '_tf_config': gpu_options {\n",
      "  per_process_gpu_memory_fraction: 1.0\n",
      "}\n",
      ", '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_secs': None, '_log_step_count_steps': 100, '_protocol': None, '_session_config': None, '_save_checkpoints_steps': 100, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_model_dir': 'models/model_WIDE_AND_DEEP_1563514182'}\n",
      "estimator built\n"
     ]
    }
   ],
   "source": [
    "MODEL_TYPE = 'WIDE_AND_DEEP'\n",
    "model_dir = create_model_dir(model_type=MODEL_TYPE)\n",
    "model_w_n_d = get_model(model_type=MODEL_TYPE, model_dir=model_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model directory = models/model_WIDE_1563514182\n",
      "INFO:tensorflow:Using default config.\n",
      "INFO:tensorflow:Using config: {'_task_type': None, '_task_id': 0, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x1a3024cb38>, '_master': '', '_num_ps_replicas': 0, '_num_worker_replicas': 0, '_environment': 'local', '_is_chief': True, '_evaluation_master': '', '_train_distribute': None, '_eval_distribute': None, '_device_fn': None, '_tf_config': gpu_options {\n",
      "  per_process_gpu_memory_fraction: 1.0\n",
      "}\n",
      ", '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_secs': 600, '_log_step_count_steps': 100, '_protocol': None, '_session_config': None, '_save_checkpoints_steps': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_model_dir': 'models/model_WIDE_1563514182'}\n",
      "estimator built\n"
     ]
    }
   ],
   "source": [
    "MODEL_TYPE = 'WIDE'\n",
    "model_dir = create_model_dir(model_type=MODEL_TYPE)\n",
    "model_w = get_model(model_type=MODEL_TYPE, model_dir=model_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model directory = models/model_DEEP_1563514182\n",
      "INFO:tensorflow:Using default config.\n",
      "INFO:tensorflow:Using config: {'_task_type': None, '_task_id': 0, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x1a42fc40b8>, '_master': '', '_num_ps_replicas': 0, '_num_worker_replicas': 0, '_environment': 'local', '_is_chief': True, '_evaluation_master': '', '_train_distribute': None, '_eval_distribute': None, '_device_fn': None, '_tf_config': gpu_options {\n",
      "  per_process_gpu_memory_fraction: 1.0\n",
      "}\n",
      ", '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_secs': 600, '_log_step_count_steps': 100, '_protocol': None, '_session_config': None, '_save_checkpoints_steps': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_model_dir': 'models/model_DEEP_1563514182'}\n",
      "estimator built\n"
     ]
    }
   ],
   "source": [
    "MODEL_TYPE = 'DEEP'\n",
    "model_dir = create_model_dir(model_type=MODEL_TYPE)\n",
    "model_d = get_model(model_type=MODEL_TYPE, model_dir=model_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input function configured\n"
     ]
    }
   ],
   "source": [
    "BATCH_SIZE = 400\n",
    "\n",
    "def generate_input_fn(filename, batch_size=BATCH_SIZE):\n",
    "    def _input_fn():\n",
    "        filename_queue = tf.train.string_input_producer([filename])\n",
    "        reader = tf.TextLineReader()\n",
    "        # Reads out batch_size number of lines\n",
    "        key, value = reader.read_up_to(filename_queue, num_records=batch_size)\n",
    "        \n",
    "        # 1 int label, 13 ints, 26 strings\n",
    "        cont_defaults = [ [0] for i in range(1,14) ]\n",
    "        cate_defaults = [ [\" \"] for i in range(1,27) ]\n",
    "        label_defaults = [ [0] ]\n",
    "        column_headers = TRAIN_DATA_COLUMNS\n",
    "        # The label is the first column of the data.\n",
    "        record_defaults = label_defaults + cont_defaults + cate_defaults\n",
    "\n",
    "        # Decode CSV data that was just read out. \n",
    "        # Note that this does NOT return a dict, \n",
    "        # so we will need to zip it up with our headers\n",
    "        columns = tf.decode_csv(\n",
    "            value, record_defaults=record_defaults)\n",
    "        \n",
    "        # all_columns is a dictionary that maps from column names to tensors of the data.\n",
    "        all_columns = dict(zip(column_headers, columns))\n",
    "        \n",
    "        # Pop and save our labels \n",
    "        # dict.pop() returns the popped array of values; exactly what we need!\n",
    "        labels = all_columns.pop(LABEL_COLUMN[0])\n",
    "        \n",
    "        # the remaining columns are our features\n",
    "        features = all_columns \n",
    "\n",
    "        # Sparse categorical features must be represented with an additional dimension. \n",
    "        # There is no additional work needed for the Continuous columns; they are the unaltered columns.\n",
    "        # See docs for tf.SparseTensor for more info\n",
    "        for feature_name in CATEGORICAL_COLUMNS:\n",
    "            features[feature_name] = tf.expand_dims(features[feature_name], -1)\n",
    "\n",
    "        return features, labels\n",
    "\n",
    "    return _input_fn\n",
    "\n",
    "print('input function configured')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_file = \"data/train.csv\"\n",
    "eval_file  = \"data/eval.csv\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Colocations handled automatically by placer.\n",
      "WARNING:tensorflow:From <ipython-input-13-7b6e08de6b32>:5: string_input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(string_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/training/input.py:278: input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(input_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/training/input.py:190: limit_epochs (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensors(tensor).repeat(num_epochs)`.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/training/input.py:199: QueueRunner.__init__ (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "To construct input pipelines, use the `tf.data` module.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/training/input.py:199: add_queue_runner (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "To construct input pipelines, use the `tf.data` module.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/training/input.py:202: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.cast instead.\n",
      "WARNING:tensorflow:From <ipython-input-13-7b6e08de6b32>:6: TextLineReader.__init__ (from tensorflow.python.ops.io_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.TextLineDataset`.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/ops/array_grad.py:425: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.cast instead.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/ops/metrics_impl.py:788: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Deprecated in favor of operator or tf.math.divide.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:677: ModelFnOps.__new__ (from tensorflow.contrib.learn.python.learn.estimators.model_fn) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "When switching to tf.estimator.Estimator, use tf.estimator.EstimatorSpec. You can use the `estimator_spec` method to create an equivalent one.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py:809: start_queue_runners (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "To construct input pipelines, use the `tf.data` module.\n",
      "INFO:tensorflow:Saving checkpoints for 0 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:loss = 21.57232, step = 2\n",
      "INFO:tensorflow:Saving checkpoints for 102 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 19.5426\n",
      "INFO:tensorflow:loss = 0.8324415, step = 202 (7.778 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 204 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 32.6845\n",
      "INFO:tensorflow:Saving checkpoints for 306 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.1539\n",
      "INFO:tensorflow:loss = 0.52874094, step = 402 (2.258 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 408 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 88.7404\n",
      "INFO:tensorflow:Saving checkpoints for 510 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/training/saver.py:966: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to delete files with this prefix.\n",
      "INFO:tensorflow:global_step/sec: 89.8622\n",
      "INFO:tensorflow:loss = 0.50054896, step = 602 (2.258 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 612 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.7893\n",
      "INFO:tensorflow:Saving checkpoints for 714 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.3878\n",
      "INFO:tensorflow:loss = 0.5160162, step = 802 (2.257 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 816 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.2574\n",
      "INFO:tensorflow:Saving checkpoints for 918 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 88.7854\n",
      "INFO:tensorflow:loss = 0.5645935, step = 1002 (2.265 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1020 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.4636\n",
      "INFO:tensorflow:Saving checkpoints for 1122 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 90.1397\n",
      "INFO:tensorflow:loss = 0.45955077, step = 1202 (2.248 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1224 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.9725\n",
      "INFO:tensorflow:Saving checkpoints for 1326 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.3075\n",
      "INFO:tensorflow:loss = 0.4941931, step = 1402 (2.258 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1428 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.1932\n",
      "INFO:tensorflow:Saving checkpoints for 1530 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.1441\n",
      "INFO:tensorflow:loss = 0.54021, step = 1602 (2.265 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1632 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.6127\n",
      "INFO:tensorflow:Saving checkpoints for 1734 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 90.1701\n",
      "INFO:tensorflow:loss = 0.48692238, step = 1802 (2.238 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 1836 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 90.299\n",
      "INFO:tensorflow:Saving checkpoints for 1938 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:global_step/sec: 89.5514\n",
      "INFO:tensorflow:loss = 0.48190945, step = 2002 (2.249 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 2002 into models/model_WIDE_AND_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 0.48190945.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Saving checkpoints for 0 into models/model_WIDE_1563514182/model.ckpt.\n",
      "INFO:tensorflow:loss = 0.6931474, step = 1\n",
      "INFO:tensorflow:global_step/sec: 71.9787\n",
      "INFO:tensorflow:loss = 0.45659465, step = 101 (1.390 sec)\n",
      "INFO:tensorflow:global_step/sec: 187.928\n",
      "INFO:tensorflow:loss = 0.53344035, step = 201 (0.532 sec)\n",
      "INFO:tensorflow:global_step/sec: 188.875\n",
      "INFO:tensorflow:loss = 0.47971246, step = 301 (0.530 sec)\n",
      "INFO:tensorflow:global_step/sec: 188.586\n",
      "INFO:tensorflow:loss = 0.5206535, step = 401 (0.530 sec)\n",
      "INFO:tensorflow:global_step/sec: 191.943\n",
      "INFO:tensorflow:loss = 0.5690068, step = 501 (0.521 sec)\n",
      "INFO:tensorflow:global_step/sec: 188.621\n",
      "INFO:tensorflow:loss = 0.4670084, step = 601 (0.530 sec)\n",
      "INFO:tensorflow:global_step/sec: 186.755\n",
      "INFO:tensorflow:loss = 0.5033304, step = 701 (0.536 sec)\n",
      "INFO:tensorflow:global_step/sec: 188.184\n",
      "INFO:tensorflow:loss = 0.56871617, step = 801 (0.531 sec)\n",
      "INFO:tensorflow:global_step/sec: 187.815\n",
      "INFO:tensorflow:loss = 0.4968151, step = 901 (0.532 sec)\n",
      "INFO:tensorflow:global_step/sec: 187.35\n",
      "INFO:tensorflow:loss = 0.49897653, step = 1001 (0.534 sec)\n",
      "INFO:tensorflow:global_step/sec: 187.428\n",
      "INFO:tensorflow:loss = 0.57331276, step = 1101 (0.534 sec)\n",
      "INFO:tensorflow:global_step/sec: 187.947\n",
      "INFO:tensorflow:loss = 0.5235764, step = 1201 (0.532 sec)\n",
      "INFO:tensorflow:global_step/sec: 187.73\n",
      "INFO:tensorflow:loss = 0.46780455, step = 1301 (0.533 sec)\n",
      "INFO:tensorflow:global_step/sec: 183.782\n",
      "INFO:tensorflow:loss = 0.44247612, step = 1401 (0.544 sec)\n",
      "INFO:tensorflow:global_step/sec: 188.269\n",
      "INFO:tensorflow:loss = 0.5041065, step = 1501 (0.532 sec)\n",
      "INFO:tensorflow:global_step/sec: 186.436\n",
      "INFO:tensorflow:loss = 0.47314006, step = 1601 (0.536 sec)\n",
      "INFO:tensorflow:global_step/sec: 190.319\n",
      "INFO:tensorflow:loss = 0.5302299, step = 1701 (0.526 sec)\n",
      "INFO:tensorflow:global_step/sec: 185.372\n",
      "INFO:tensorflow:loss = 0.54489845, step = 1801 (0.539 sec)\n",
      "INFO:tensorflow:global_step/sec: 186.41\n",
      "INFO:tensorflow:loss = 0.49571082, step = 1901 (0.537 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 2000 into models/model_WIDE_1563514182/model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 0.5170178.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Saving checkpoints for 0 into models/model_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:loss = 10.955603, step = 1\n",
      "INFO:tensorflow:global_step/sec: 56.8711\n",
      "INFO:tensorflow:loss = 3.455068, step = 101 (1.759 sec)\n",
      "INFO:tensorflow:global_step/sec: 122.887\n",
      "INFO:tensorflow:loss = 1.2678814, step = 201 (0.813 sec)\n",
      "INFO:tensorflow:global_step/sec: 122.772\n",
      "INFO:tensorflow:loss = 1.2522515, step = 301 (0.815 sec)\n",
      "INFO:tensorflow:global_step/sec: 122.799\n",
      "INFO:tensorflow:loss = 1.3234748, step = 401 (0.814 sec)\n",
      "INFO:tensorflow:global_step/sec: 122.659\n",
      "INFO:tensorflow:loss = 0.7107979, step = 501 (0.815 sec)\n",
      "INFO:tensorflow:global_step/sec: 123.048\n",
      "INFO:tensorflow:loss = 0.60606605, step = 601 (0.813 sec)\n",
      "INFO:tensorflow:global_step/sec: 122.799\n",
      "INFO:tensorflow:loss = 0.5648017, step = 701 (0.815 sec)\n",
      "INFO:tensorflow:global_step/sec: 123.247\n",
      "INFO:tensorflow:loss = 0.5998552, step = 801 (0.811 sec)\n",
      "INFO:tensorflow:global_step/sec: 124.158\n",
      "INFO:tensorflow:loss = 0.5625596, step = 901 (0.806 sec)\n",
      "INFO:tensorflow:global_step/sec: 121.979\n",
      "INFO:tensorflow:loss = 0.5542034, step = 1001 (0.819 sec)\n",
      "INFO:tensorflow:global_step/sec: 123.86\n",
      "INFO:tensorflow:loss = 0.60669935, step = 1101 (0.808 sec)\n",
      "INFO:tensorflow:global_step/sec: 117.185\n",
      "INFO:tensorflow:loss = 0.5995978, step = 1201 (0.853 sec)\n",
      "INFO:tensorflow:global_step/sec: 120.603\n",
      "INFO:tensorflow:loss = 0.54912436, step = 1301 (0.829 sec)\n",
      "INFO:tensorflow:global_step/sec: 121.021\n",
      "INFO:tensorflow:loss = 0.55267805, step = 1401 (0.827 sec)\n",
      "INFO:tensorflow:global_step/sec: 119.922\n",
      "INFO:tensorflow:loss = 0.5758392, step = 1501 (0.834 sec)\n",
      "INFO:tensorflow:global_step/sec: 121.91\n",
      "INFO:tensorflow:loss = 0.49281663, step = 1601 (0.820 sec)\n",
      "INFO:tensorflow:global_step/sec: 121.639\n",
      "INFO:tensorflow:loss = 0.5505726, step = 1701 (0.822 sec)\n",
      "INFO:tensorflow:global_step/sec: 122.138\n",
      "INFO:tensorflow:loss = 0.6140295, step = 1801 (0.819 sec)\n",
      "INFO:tensorflow:global_step/sec: 122.041\n",
      "INFO:tensorflow:loss = 0.5377367, step = 1901 (0.819 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 2000 into models/model_DEEP_1563514182/model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 0.5574483.\n",
      "fit done\n",
      "CPU times: user 2min 36s, sys: 22.2 s, total: 2min 58s\n",
      "Wall time: 1min 25s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "# This can be found with\n",
    "# wc -l train.csv\n",
    "train_sample_size = 800000\n",
    "train_steps = train_sample_size/BATCH_SIZE # 8000/40 = 200\n",
    "\n",
    "model_w_n_d.fit(input_fn=generate_input_fn(train_file, BATCH_SIZE), steps=train_steps)\n",
    "model_w.fit(input_fn=generate_input_fn(train_file, BATCH_SIZE), steps=train_steps)\n",
    "model_d.fit(input_fn=generate_input_fn(train_file, BATCH_SIZE), steps=train_steps)\n",
    "\n",
    "print('fit done')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "INFO:tensorflow:Starting evaluation at 2019-07-19T05:33:37Z\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "WARNING:tensorflow:From /anaconda3/envs/po4/lib/python3.6/site-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to check for files with this prefix.\n",
      "INFO:tensorflow:Restoring parameters from models/model_WIDE_AND_DEEP_1563514182/model.ckpt-2002\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Evaluation [50/500]\n",
      "INFO:tensorflow:Evaluation [100/500]\n",
      "INFO:tensorflow:Evaluation [150/500]\n",
      "INFO:tensorflow:Evaluation [200/500]\n",
      "INFO:tensorflow:Evaluation [250/500]\n",
      "INFO:tensorflow:Evaluation [300/500]\n",
      "INFO:tensorflow:Evaluation [350/500]\n",
      "INFO:tensorflow:Evaluation [400/500]\n",
      "INFO:tensorflow:Evaluation [450/500]\n",
      "INFO:tensorflow:Evaluation [500/500]\n",
      "INFO:tensorflow:Finished evaluation at 2019-07-19-05:33:44\n",
      "INFO:tensorflow:Saving dict for global step 2002: accuracy = 0.768985, accuracy/baseline_label_mean = 0.251165, accuracy/threshold_0.500000_mean = 0.768985, auc = 0.7361028, auc_precision_recall = 0.49478266, global_step = 2002, labels/actual_label_mean = 0.251165, labels/prediction_mean = 0.23993276, loss = 0.4962089, precision/positive_threshold_0.500000_mean = 0.6645704, recall/positive_threshold_0.500000_mean = 0.16198514\n",
      "evaluate done\n",
      "Accuracy: 0.768985\n",
      "{'loss': 0.4962089, 'accuracy': 0.768985, 'labels/prediction_mean': 0.23993276, 'labels/actual_label_mean': 0.251165, 'accuracy/baseline_label_mean': 0.251165, 'auc': 0.7361028, 'auc_precision_recall': 0.49478266, 'accuracy/threshold_0.500000_mean': 0.768985, 'precision/positive_threshold_0.500000_mean': 0.6645704, 'recall/positive_threshold_0.500000_mean': 0.16198514, 'global_step': 2002}\n",
      "CPU times: user 17.2 s, sys: 2.39 s, total: 19.6 s\n",
      "Wall time: 9.78 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "eval_sample_size = 200000 # this can be found with a 'wc -l eval.csv'\n",
    "eval_steps = eval_sample_size/BATCH_SIZE # 2000/40 = 50\n",
    "\n",
    "results = model_w_n_d.evaluate(input_fn=generate_input_fn(eval_file), \n",
    "                     steps=eval_steps)\n",
    "print('evaluate done')\n",
    "\n",
    "print('Accuracy: %s' % results['accuracy'])\n",
    "print(results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "INFO:tensorflow:Starting evaluation at 2019-07-19T05:34:03Z\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from models/model_WIDE_1563514182/model.ckpt-2000\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Evaluation [50/500]\n",
      "INFO:tensorflow:Evaluation [100/500]\n",
      "INFO:tensorflow:Evaluation [150/500]\n",
      "INFO:tensorflow:Evaluation [200/500]\n",
      "INFO:tensorflow:Evaluation [250/500]\n",
      "INFO:tensorflow:Evaluation [300/500]\n",
      "INFO:tensorflow:Evaluation [350/500]\n",
      "INFO:tensorflow:Evaluation [400/500]\n",
      "INFO:tensorflow:Evaluation [450/500]\n",
      "INFO:tensorflow:Evaluation [500/500]\n",
      "INFO:tensorflow:Finished evaluation at 2019-07-19-05:34:06\n",
      "INFO:tensorflow:Saving dict for global step 2000: accuracy = 0.766125, accuracy/baseline_label_mean = 0.251165, accuracy/threshold_0.500000_mean = 0.766125, auc = 0.72278744, auc_precision_recall = 0.47572494, global_step = 2000, labels/actual_label_mean = 0.251165, labels/prediction_mean = 0.26990917, loss = 0.50244325, precision/positive_threshold_0.500000_mean = 0.6087969, recall/positive_threshold_0.500000_mean = 0.19260247\n",
      "evaluate done\n",
      "Accuracy: 0.766125\n",
      "{'loss': 0.50244325, 'accuracy': 0.766125, 'labels/prediction_mean': 0.26990917, 'labels/actual_label_mean': 0.251165, 'accuracy/baseline_label_mean': 0.251165, 'auc': 0.72278744, 'auc_precision_recall': 0.47572494, 'accuracy/threshold_0.500000_mean': 0.766125, 'precision/positive_threshold_0.500000_mean': 0.6087969, 'recall/positive_threshold_0.500000_mean': 0.19260247, 'global_step': 2000}\n"
     ]
    }
   ],
   "source": [
    "results = model_w.evaluate(input_fn=generate_input_fn(eval_file), \n",
    "                     steps=eval_steps)\n",
    "print('evaluate done')\n",
    "\n",
    "print('Accuracy: %s' % results['accuracy'])\n",
    "print(results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Casting <dtype: 'int32'> labels to bool.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "WARNING:tensorflow:Trapezoidal rule is known to produce incorrect PR-AUCs; please switch to \"careful_interpolation\" instead.\n",
      "INFO:tensorflow:Starting evaluation at 2019-07-19T05:34:17Z\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from models/model_DEEP_1563514182/model.ckpt-2000\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Evaluation [50/500]\n",
      "INFO:tensorflow:Evaluation [100/500]\n",
      "INFO:tensorflow:Evaluation [150/500]\n",
      "INFO:tensorflow:Evaluation [200/500]\n",
      "INFO:tensorflow:Evaluation [250/500]\n",
      "INFO:tensorflow:Evaluation [300/500]\n",
      "INFO:tensorflow:Evaluation [350/500]\n",
      "INFO:tensorflow:Evaluation [400/500]\n",
      "INFO:tensorflow:Evaluation [450/500]\n",
      "INFO:tensorflow:Evaluation [500/500]\n",
      "INFO:tensorflow:Finished evaluation at 2019-07-19-05:34:21\n",
      "INFO:tensorflow:Saving dict for global step 2000: accuracy = 0.75249, accuracy/baseline_label_mean = 0.251165, accuracy/threshold_0.500000_mean = 0.75249, auc = 0.6642497, auc_precision_recall = 0.39807573, global_step = 2000, labels/actual_label_mean = 0.251165, labels/prediction_mean = 0.27049753, loss = 0.5411806, precision/positive_threshold_0.500000_mean = 0.5636427, recall/positive_threshold_0.500000_mean = 0.064439714\n",
      "evaluate done\n",
      "Accuracy: 0.75249\n",
      "{'loss': 0.5411806, 'accuracy': 0.75249, 'labels/prediction_mean': 0.27049753, 'labels/actual_label_mean': 0.251165, 'accuracy/baseline_label_mean': 0.251165, 'auc': 0.6642497, 'auc_precision_recall': 0.39807573, 'accuracy/threshold_0.500000_mean': 0.75249, 'precision/positive_threshold_0.500000_mean': 0.5636427, 'recall/positive_threshold_0.500000_mean': 0.064439714, 'global_step': 2000}\n"
     ]
    }
   ],
   "source": [
    "results = model_d.evaluate(input_fn=generate_input_fn(eval_file), \n",
    "                     steps=eval_steps)\n",
    "print('evaluate done')\n",
    "\n",
    "print('Accuracy: %s' % results['accuracy'])\n",
    "print(results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
