{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## CTR预估(2)\n",
    "\n",
    "资料&&代码整理by[@寒小阳](https://blog.csdn.net/han_xiaoyang)(hanxiaoyang.ml@gmail.com)\n",
    "\n",
    "reference：\n",
    "* [《广告点击率预估是怎么回事？》](https://zhuanlan.zhihu.com/p/23499698)\n",
    "* [从ctr预估问题看看f(x)设计—DNN篇](https://zhuanlan.zhihu.com/p/28202287)\n",
    "* [Atomu2014 product_nets](https://github.com/Atomu2014/product-nets)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "同样以criteo数据为例，我们来看看深度学习的应用。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 特征工程\n",
    "特征工程是比较重要的数据处理过程，这里对criteo数据依照[paddlepaddle做ctr预估特征工程](https://github.com/PaddlePaddle/models/blob/develop/deep_fm/preprocess.py)完成特征工程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#coding=utf8\n",
    "\"\"\"\n",
    "特征工程参考(https://github.com/PaddlePaddle/models/blob/develop/deep_fm/preprocess.py)完成\n",
    "-对数值型特征，normalize处理\n",
    "-对类别型特征，对长尾(出现频次低于200)的进行过滤\n",
    "\"\"\"\n",
    "import os\n",
    "import sys\n",
    "import random\n",
    "import collections\n",
    "import argparse\n",
    "from multiprocessing import Pool as ThreadPool\n",
    "\n",
    "# 13个连续型列，26个类别型列\n",
    "continous_features = range(1, 14)\n",
    "categorial_features = range(14, 40)\n",
    "\n",
    "# 对连续值进行截断处理(取每个连续值列的95%分位数)\n",
    "continous_clip = [20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50]\n",
    "\n",
    "\n",
    "class CategoryDictGenerator:\n",
    "    \"\"\"\n",
    "    类别型特征编码字典\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, num_feature):\n",
    "        self.dicts = []\n",
    "        self.num_feature = num_feature\n",
    "        for i in range(0, num_feature):\n",
    "            self.dicts.append(collections.defaultdict(int))\n",
    "\n",
    "    def build(self, datafile, categorial_features, cutoff=0):\n",
    "        with open(datafile, 'r') as f:\n",
    "            for line in f:\n",
    "                features = line.rstrip('\\n').split('\\t')\n",
    "                for i in range(0, self.num_feature):\n",
    "                    if features[categorial_features[i]] != '':\n",
    "                        self.dicts[i][features[categorial_features[i]]] += 1\n",
    "        for i in range(0, self.num_feature):\n",
    "            self.dicts[i] = filter(lambda x: x[1] >= cutoff, self.dicts[i].items())\n",
    "            self.dicts[i] = sorted(self.dicts[i], key=lambda x: (-x[1], x[0]))\n",
    "            vocabs, _ = list(zip(*self.dicts[i]))\n",
    "            self.dicts[i] = dict(zip(vocabs, range(1, len(vocabs) + 1)))\n",
    "            self.dicts[i]['<unk>'] = 0\n",
    "\n",
    "    def gen(self, idx, key):\n",
    "        if key not in self.dicts[idx]:\n",
    "            res = self.dicts[idx]['<unk>']\n",
    "        else:\n",
    "            res = self.dicts[idx][key]\n",
    "        return res\n",
    "\n",
    "    def dicts_sizes(self):\n",
    "        return map(len, self.dicts)\n",
    "\n",
    "\n",
    "class ContinuousFeatureGenerator:\n",
    "    \"\"\"\n",
    "    对连续值特征做最大最小值normalization\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, num_feature):\n",
    "        self.num_feature = num_feature\n",
    "        self.min = [sys.maxint] * num_feature\n",
    "        self.max = [-sys.maxint] * num_feature\n",
    "\n",
    "    def build(self, datafile, continous_features):\n",
    "        with open(datafile, 'r') as f:\n",
    "            for line in f:\n",
    "                features = line.rstrip('\\n').split('\\t')\n",
    "                for i in range(0, self.num_feature):\n",
    "                    val = features[continous_features[i]]\n",
    "                    if val != '':\n",
    "                        val = int(val)\n",
    "                        if val > continous_clip[i]:\n",
    "                            val = continous_clip[i]\n",
    "                        self.min[i] = min(self.min[i], val)\n",
    "                        self.max[i] = max(self.max[i], val)\n",
    "\n",
    "    def gen(self, idx, val):\n",
    "        if val == '':\n",
    "            return 0.0\n",
    "        val = float(val)\n",
    "        return (val - self.min[idx]) / (self.max[idx] - self.min[idx])\n",
    "\n",
    "\n",
    "def preprocess(input_dir, output_dir):\n",
    "    \"\"\"\n",
    "    对连续型和类别型特征进行处理\n",
    "    \"\"\"\n",
    "    \n",
    "    dists = ContinuousFeatureGenerator(len(continous_features))\n",
    "    dists.build(input_dir + 'train.txt', continous_features)\n",
    "\n",
    "    dicts = CategoryDictGenerator(len(categorial_features))\n",
    "    dicts.build(input_dir + 'train.txt', categorial_features, cutoff=150)\n",
    "\n",
    "    output = open(output_dir + 'feature_map','w')\n",
    "    for i in continous_features:\n",
    "        output.write(\"{0} {1}\\n\".format('I'+str(i), i))\n",
    "    dict_sizes = dicts.dicts_sizes()\n",
    "    categorial_feature_offset = [dists.num_feature]\n",
    "    for i in range(1, len(categorial_features)+1):\n",
    "        offset = categorial_feature_offset[i - 1] + dict_sizes[i - 1]\n",
    "        categorial_feature_offset.append(offset)\n",
    "        for key, val in dicts.dicts[i-1].iteritems():\n",
    "            output.write(\"{0} {1}\\n\".format('C'+str(i)+'|'+key, categorial_feature_offset[i - 1]+val+1))\n",
    "\n",
    "    random.seed(0)\n",
    "\n",
    "    # 90%的数据用于训练，10%的数据用于验证\n",
    "    with open(output_dir + 'tr.libsvm', 'w') as out_train:\n",
    "        with open(output_dir + 'va.libsvm', 'w') as out_valid:\n",
    "            with open(input_dir + 'train.txt', 'r') as f:\n",
    "                for line in f:\n",
    "                    features = line.rstrip('\\n').split('\\t')\n",
    "\n",
    "                    feat_vals = []\n",
    "                    for i in range(0, len(continous_features)):\n",
    "                        val = dists.gen(i, features[continous_features[i]])\n",
    "                        feat_vals.append(str(continous_features[i]) + ':' + \"{0:.6f}\".format(val).rstrip('0').rstrip('.'))\n",
    "\n",
    "                    for i in range(0, len(categorial_features)):\n",
    "                        val = dicts.gen(i, features[categorial_features[i]]) + categorial_feature_offset[i]\n",
    "                        feat_vals.append(str(val) + ':1')\n",
    "\n",
    "                    label = features[0]\n",
    "                    if random.randint(0, 9999) % 10 != 0:\n",
    "                        out_train.write(\"{0} {1}\\n\".format(label, ' '.join(feat_vals)))\n",
    "                    else:\n",
    "                        out_valid.write(\"{0} {1}\\n\".format(label, ' '.join(feat_vals)))\n",
    "\n",
    "    with open(output_dir + 'te.libsvm', 'w') as out:\n",
    "        with open(input_dir + 'test.txt', 'r') as f:\n",
    "            for line in f:\n",
    "                features = line.rstrip('\\n').split('\\t')\n",
    "\n",
    "                feat_vals = []\n",
    "                for i in range(0, len(continous_features)):\n",
    "                    val = dists.gen(i, features[continous_features[i] - 1])\n",
    "                    feat_vals.append(str(continous_features[i]) + ':' + \"{0:.6f}\".format(val).rstrip('0').rstrip('.'))\n",
    "\n",
    "                for i in range(0, len(categorial_features)):\n",
    "                    val = dicts.gen(i, features[categorial_features[i] - 1]) + categorial_feature_offset[i]\n",
    "                    feat_vals.append(str(val) + ':1')\n",
    "\n",
    "                out.write(\"{0} {1}\\n\".format(label, ' '.join(feat_vals)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始数据处理与特征工程...\n"
     ]
    }
   ],
   "source": [
    "input_dir = './criteo_data/'\n",
    "output_dir = './criteo_data/'\n",
    "print(\"开始数据处理与特征工程...\")\n",
    "preprocess(input_dir, output_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 1:0.2 2:0.028192 3:0.07 4:0.56 5:0.000562 6:0.056 7:0.04 8:0.86 9:0.094 10:0.25 11:0.2 12:0 13:0.56 14:1 169:1 631:1 1414:1 2534:1 2584:1 4239:1 4991:1 5060:1 5064:1 7141:1 8818:1 9906:1 11250:1 11377:1 12951:1 13833:1 13883:1 14817:1 15204:1 15327:1 16118:1 16128:1 16183:1 17289:1 17361:1\r\n",
      "1 1:0 2:0.004975 3:0.11 4:0 5:1.373375 6:0 7:0 8:0.14 9:0 10:0 11:0 12:0 13:0 14:1 181:1 543:1 1379:1 2534:1 2582:1 3632:1 4990:1 5061:1 5217:1 6925:1 8726:1 9705:1 11250:1 11605:1 12849:1 13835:1 13971:1 14816:1 15202:1 15224:1 16118:1 16129:1 16148:1 17280:1 17320:1\r\n",
      "0 1:0.1 2:0.008292 3:0.28 4:0.14 5:0.000016 6:0.002 7:0.21 8:0.14 9:0.786 10:0.125 11:0.4 12:0 13:0.02 14:1 209:1 632:1 1491:1 2534:1 2582:1 2719:1 4995:1 5060:1 5069:1 6960:1 8820:1 9727:1 11249:1 11471:1 12933:1 13834:1 13927:1 14817:1 15204:1 15328:1 16118:1 16129:1 16185:1 17282:1 17364:1\r\n",
      "1 1:0 2:0.003317 3:0.04 4:0.5 5:0.504031 6:0.222 7:0.02 8:0.72 9:0.16 10:0 11:0.1 12:0 13:0.5 14:1 156:1 529:1 1377:1 2534:1 2583:1 3768:1 4990:1 5060:1 5247:1 7131:1 8711:1 9893:1 11250:1 11361:1 12827:1 13835:1 13888:1 14816:1 15202:1 15207:1 16118:1 16129:1 16145:1 17280:1 17320:1\r\n",
      "0 1:0 2:0.004975 3:0.28 4:0.14 5:0.022766 6:0.058 7:0.05 8:0.28 9:0.464 10:0 11:0.3 12:0 13:0.14 15:1 142:1 528:1 1376:1 2535:1 2582:1 2659:1 4997:1 5060:1 5064:1 7780:1 8710:1 9703:1 11250:1 11324:1 12826:1 13834:1 13861:1 14817:1 15203:1 15206:1 16118:1 16128:1 16746:1 17282:1 17320:1\r\n"
     ]
    }
   ],
   "source": [
    "!head -5 ./criteo_data/tr.libsvm"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### DeepFM\n",
    "reference：[常见的计算广告点击率预估算法总结](https://zhuanlan.zhihu.com/p/29053940)\n",
    "\n",
    "DeepFM结合了wide and deep和FM，其实就是把PNN和WDL结合了。原始的Wide and Deep，Wide的部分只是LR，构造线性关系，Deep部分建模更高阶的关系，所以在Wide and Deep中还需要做一些特征的东西，如Cross Column的工作，FM是可以建模二阶关系达到Cross column的效果，DeepFM就是把FM和NN结合，无需再对特征做诸如Cross Column的工作了。\n",
    "\n",
    "FM部分如下：\n",
    "![](https://pic1.zhimg.com/80/v2-7bdf133eb39aa65aefc84c71b98e64e5_hd.jpg)\n",
    "\n",
    "Deep部分如下：\n",
    "![](https://pic2.zhimg.com/80/v2-e02f6b7d867d7aa2600bab38e39df7d6_hd.jpg)\n",
    "\n",
    "总体结构图：\n",
    "![](https://pic4.zhimg.com/80/v2-a3d58ffcf53af5b1eef70ac42b555317_hd.jpg)\n",
    "\n",
    "DeepFM相对于FNN、PNN，能够利用其Deep部分建模更高阶信息（二阶以上），而相对于Wide and Deep能够减少特征工程的部分工作，wide部分类似FM建模一、二阶特征间关系， 算是NN和FM的一个很好的结合，另外不同的是如下图，DeepFM的wide和deep部分共享embedding向量空间，wide和deep均可以更新embedding部分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# %load DeepFM.py\n",
    "#!/usr/bin/env python\n",
    "\"\"\"\n",
    "#1 Input pipline using Dataset high level API, Support parallel and prefetch reading\n",
    "#2 Train pipline using Coustom Estimator by rewriting model_fn\n",
    "#3 Support distincted training using TF_CONFIG\n",
    "#4 Support export_model for TensorFlow Serving\n",
    "\n",
    "方便迁移到其他算法上，只要修改input_fn and model_fn\n",
    "by lambdaji\n",
    "\"\"\"\n",
    "#from __future__ import absolute_import\n",
    "#from __future__ import division\n",
    "#from __future__ import print_function\n",
    "\n",
    "#import argparse\n",
    "import shutil\n",
    "#import sys\n",
    "import os\n",
    "import json\n",
    "import glob\n",
    "from datetime import date, timedelta\n",
    "from time import time\n",
    "#import gc\n",
    "#from multiprocessing import Process\n",
    "\n",
    "#import math\n",
    "import random\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "\n",
    "#################### CMD Arguments ####################\n",
    "FLAGS = tf.app.flags.FLAGS\n",
    "tf.app.flags.DEFINE_integer(\"dist_mode\", 0, \"distribuion mode {0-loacal, 1-single_dist, 2-multi_dist}\")\n",
    "tf.app.flags.DEFINE_string(\"ps_hosts\", '', \"Comma-separated list of hostname:port pairs\")\n",
    "tf.app.flags.DEFINE_string(\"worker_hosts\", '', \"Comma-separated list of hostname:port pairs\")\n",
    "tf.app.flags.DEFINE_string(\"job_name\", '', \"One of 'ps', 'worker'\")\n",
    "tf.app.flags.DEFINE_integer(\"task_index\", 0, \"Index of task within the job\")\n",
    "tf.app.flags.DEFINE_integer(\"num_threads\", 16, \"Number of threads\")\n",
    "tf.app.flags.DEFINE_integer(\"feature_size\", 0, \"Number of features\")\n",
    "tf.app.flags.DEFINE_integer(\"field_size\", 0, \"Number of fields\")\n",
    "tf.app.flags.DEFINE_integer(\"embedding_size\", 32, \"Embedding size\")\n",
    "tf.app.flags.DEFINE_integer(\"num_epochs\", 10, \"Number of epochs\")\n",
    "tf.app.flags.DEFINE_integer(\"batch_size\", 64, \"Number of batch size\")\n",
    "tf.app.flags.DEFINE_integer(\"log_steps\", 1000, \"save summary every steps\")\n",
    "tf.app.flags.DEFINE_float(\"learning_rate\", 0.0005, \"learning rate\")\n",
    "tf.app.flags.DEFINE_float(\"l2_reg\", 0.0001, \"L2 regularization\")\n",
    "tf.app.flags.DEFINE_string(\"loss_type\", 'log_loss', \"loss type {square_loss, log_loss}\")\n",
    "tf.app.flags.DEFINE_string(\"optimizer\", 'Adam', \"optimizer type {Adam, Adagrad, GD, Momentum}\")\n",
    "tf.app.flags.DEFINE_string(\"deep_layers\", '256,128,64', \"deep layers\")\n",
    "tf.app.flags.DEFINE_string(\"dropout\", '0.5,0.5,0.5', \"dropout rate\")\n",
    "tf.app.flags.DEFINE_boolean(\"batch_norm\", False, \"perform batch normaization (True or False)\")\n",
    "tf.app.flags.DEFINE_float(\"batch_norm_decay\", 0.9, \"decay for the moving average(recommend trying decay=0.9)\")\n",
    "tf.app.flags.DEFINE_string(\"data_dir\", '', \"data dir\")\n",
    "tf.app.flags.DEFINE_string(\"dt_dir\", '', \"data dt partition\")\n",
    "tf.app.flags.DEFINE_string(\"model_dir\", '', \"model check point dir\")\n",
    "tf.app.flags.DEFINE_string(\"servable_model_dir\", '', \"export servable model for TensorFlow Serving\")\n",
    "tf.app.flags.DEFINE_string(\"task_type\", 'train', \"task type {train, infer, eval, export}\")\n",
    "tf.app.flags.DEFINE_boolean(\"clear_existing_model\", False, \"clear existing model or not\")\n",
    "\n",
    "#1 1:0.5 2:0.03519 3:1 4:0.02567 7:0.03708 8:0.01705 9:0.06296 10:0.18185 11:0.02497 12:1 14:0.02565 15:0.03267 17:0.0247 18:0.03158 20:1 22:1 23:0.13169 24:0.02933 27:0.18159 31:0.0177 34:0.02888 38:1 51:1 63:1 132:1 164:1 236:1\n",
    "def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False):\n",
    "    print('Parsing', filenames)\n",
    "    def decode_libsvm(line):\n",
    "        #columns = tf.decode_csv(value, record_defaults=CSV_COLUMN_DEFAULTS)\n",
    "        #features = dict(zip(CSV_COLUMNS, columns))\n",
    "        #labels = features.pop(LABEL_COLUMN)\n",
    "        columns = tf.string_split([line], ' ')\n",
    "        labels = tf.string_to_number(columns.values[0], out_type=tf.float32)\n",
    "        splits = tf.string_split(columns.values[1:], ':')\n",
    "        id_vals = tf.reshape(splits.values,splits.dense_shape)\n",
    "        feat_ids, feat_vals = tf.split(id_vals,num_or_size_splits=2,axis=1)\n",
    "        feat_ids = tf.string_to_number(feat_ids, out_type=tf.int32)\n",
    "        feat_vals = tf.string_to_number(feat_vals, out_type=tf.float32)\n",
    "        #feat_ids = tf.reshape(feat_ids,shape=[-1,FLAGS.field_size])\n",
    "        #for i in range(splits.dense_shape.eval()[0]):\n",
    "        #    feat_ids.append(tf.string_to_number(splits.values[2*i], out_type=tf.int32))\n",
    "        #    feat_vals.append(tf.string_to_number(splits.values[2*i+1]))\n",
    "        #return tf.reshape(feat_ids,shape=[-1,field_size]), tf.reshape(feat_vals,shape=[-1,field_size]), labels\n",
    "        return {\"feat_ids\": feat_ids, \"feat_vals\": feat_vals}, labels\n",
    "\n",
    "    # Extract lines from input files using the Dataset API, can pass one filename or filename list\n",
    "    dataset = tf.data.TextLineDataset(filenames).map(decode_libsvm, num_parallel_calls=10).prefetch(500000)    # multi-thread pre-process then prefetch\n",
    "\n",
    "    # Randomizes input using a window of 256 elements (read into memory)\n",
    "    if perform_shuffle:\n",
    "        dataset = dataset.shuffle(buffer_size=256)\n",
    "\n",
    "    # epochs from blending together.\n",
    "    dataset = dataset.repeat(num_epochs)\n",
    "    dataset = dataset.batch(batch_size) # Batch size to use\n",
    "\n",
    "    #return dataset.make_one_shot_iterator()\n",
    "    iterator = dataset.make_one_shot_iterator()\n",
    "    batch_features, batch_labels = iterator.get_next()\n",
    "    #return tf.reshape(batch_ids,shape=[-1,field_size]), tf.reshape(batch_vals,shape=[-1,field_size]), batch_labels\n",
    "    return batch_features, batch_labels\n",
    "\n",
    "def model_fn(features, labels, mode, params):\n",
    "    \"\"\"Bulid Model function f(x) for Estimator.\"\"\"\n",
    "    #------hyperparameters----\n",
    "    field_size = params[\"field_size\"]\n",
    "    feature_size = params[\"feature_size\"]\n",
    "    embedding_size = params[\"embedding_size\"]\n",
    "    l2_reg = params[\"l2_reg\"]\n",
    "    learning_rate = params[\"learning_rate\"]\n",
    "    #batch_norm_decay = params[\"batch_norm_decay\"]\n",
    "    #optimizer = params[\"optimizer\"]\n",
    "    layers = map(int, params[\"deep_layers\"].split(','))\n",
    "    dropout = map(float, params[\"dropout\"].split(','))\n",
    "\n",
    "    #------bulid weights------\n",
    "    FM_B = tf.get_variable(name='fm_bias', shape=[1], initializer=tf.constant_initializer(0.0))\n",
    "    FM_W = tf.get_variable(name='fm_w', shape=[feature_size], initializer=tf.glorot_normal_initializer())\n",
    "    # F\n",
    "    FM_V = tf.get_variable(name='fm_v', shape=[feature_size, embedding_size], initializer=tf.glorot_normal_initializer())\n",
    "    # F * E    \n",
    "    #------build feaure-------\n",
    "    feat_ids  = features['feat_ids']\n",
    "    feat_ids = tf.reshape(feat_ids,shape=[-1,field_size]) # None * f/K * K\n",
    "    feat_vals = features['feat_vals']\n",
    "    feat_vals = tf.reshape(feat_vals,shape=[-1,field_size]) # None * f/K * K\n",
    "\n",
    "    #------build f(x)------\n",
    "    with tf.variable_scope(\"First-order\"):\n",
    "        feat_wgts = tf.nn.embedding_lookup(FM_W, feat_ids) # None * f/K * K\n",
    "        y_w = tf.reduce_sum(tf.multiply(feat_wgts, feat_vals),1) \n",
    "\n",
    "    with tf.variable_scope(\"Second-order\"):\n",
    "        embeddings = tf.nn.embedding_lookup(FM_V, feat_ids) # None * f/K * K * E\n",
    "        feat_vals = tf.reshape(feat_vals, shape=[-1, field_size, 1]) # None * f/K * K * 1 ？\n",
    "        embeddings = tf.multiply(embeddings, feat_vals) #vij*xi  \n",
    "        sum_square = tf.square(tf.reduce_sum(embeddings,1)) # None * K * E\n",
    "        square_sum = tf.reduce_sum(tf.square(embeddings),1)\n",
    "        y_v = 0.5*tf.reduce_sum(tf.subtract(sum_square, square_sum),1)\t# None * 1\n",
    "\n",
    "    with tf.variable_scope(\"Deep-part\"):\n",
    "        if FLAGS.batch_norm:\n",
    "            #normalizer_fn = tf.contrib.layers.batch_norm\n",
    "            #normalizer_fn = tf.layers.batch_normalization\n",
    "            if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "                train_phase = True\n",
    "                #normalizer_params = {'decay': batch_norm_decay, 'center': True, 'scale': True, 'updates_collections': None, 'is_training': True, 'reuse': None}\n",
    "            else:\n",
    "                train_phase = False\n",
    "                #normalizer_params = {'decay': batch_norm_decay, 'center': True, 'scale': True, 'updates_collections': None, 'is_training': False, 'reuse': True}\n",
    "        else:\n",
    "            normalizer_fn = None\n",
    "            normalizer_params = None\n",
    "\n",
    "        deep_inputs = tf.reshape(embeddings,shape=[-1,field_size*embedding_size]) # None * (F*K)\n",
    "        for i in range(len(layers)):\n",
    "            #if FLAGS.batch_norm:\n",
    "            #    deep_inputs = batch_norm_layer(deep_inputs, train_phase=train_phase, scope_bn='bn_%d' %i)\n",
    "                #normalizer_params.update({'scope': 'bn_%d' %i})\n",
    "            deep_inputs = tf.contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=layers[i], \\\n",
    "                #normalizer_fn=normalizer_fn, normalizer_params=normalizer_params, \\\n",
    "                weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg), scope='mlp%d' % i)\n",
    "            if FLAGS.batch_norm:\n",
    "                deep_inputs = batch_norm_layer(deep_inputs, train_phase=train_phase, scope_bn='bn_%d' %i)   #放在RELU之后 https://github.com/ducha-aiki/caffenet-benchmark/blob/master/batchnorm.md#bn----before-or-after-relu\n",
    "            if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "                deep_inputs = tf.nn.dropout(deep_inputs, keep_prob=dropout[i])                              #Apply Dropout after all BN layers and set dropout=0.8(drop_ratio=0.2)\n",
    "                #deep_inputs = tf.layers.dropout(inputs=deep_inputs, rate=dropout[i], training=mode == tf.estimator.ModeKeys.TRAIN)\n",
    "\n",
    "        y_deep = tf.contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=1, activation_fn=tf.identity, \\\n",
    "                weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg), scope='deep_out')\n",
    "        y_d = tf.reshape(y_deep,shape=[-1])\n",
    "        #sig_wgts = tf.get_variable(name='sigmoid_weights', shape=[layers[-1]], initializer=tf.glorot_normal_initializer())\n",
    "        #sig_bias = tf.get_variable(name='sigmoid_bias', shape=[1], initializer=tf.constant_initializer(0.0))\n",
    "        #deep_out = tf.nn.xw_plus_b(deep_inputs,sig_wgts,sig_bias,name='deep_out')\n",
    "\n",
    "    with tf.variable_scope(\"DeepFM-out\"):\n",
    "        #y_bias = FM_B * tf.ones_like(labels, dtype=tf.float32)  # None * 1  warning;这里不能用label，否则调用predict/export函数会出错，train/evaluate正常；初步判断estimator做了优化，用不到label时不传\n",
    "        y_bias = FM_B * tf.ones_like(y_d, dtype=tf.float32)     # None * 1\n",
    "        y = y_bias + y_w + y_v + y_d\n",
    "        pred = tf.sigmoid(y)\n",
    "\n",
    "    predictions={\"prob\": pred}\n",
    "    export_outputs = {tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(predictions)}\n",
    "    # Provide an estimator spec for `ModeKeys.PREDICT`\n",
    "    if mode == tf.estimator.ModeKeys.PREDICT:\n",
    "        return tf.estimator.EstimatorSpec(\n",
    "                mode=mode,\n",
    "                predictions=predictions,\n",
    "                export_outputs=export_outputs)\n",
    "\n",
    "    #------bulid loss------\n",
    "    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=labels)) + \\\n",
    "        l2_reg * tf.nn.l2_loss(FM_W) + \\\n",
    "        l2_reg * tf.nn.l2_loss(FM_V) #+ \\ l2_reg * tf.nn.l2_loss(sig_wgts)\n",
    "\n",
    "    # Provide an estimator spec for `ModeKeys.EVAL`\n",
    "    eval_metric_ops = {\n",
    "        \"auc\": tf.metrics.auc(labels, pred)\n",
    "    }\n",
    "    if mode == tf.estimator.ModeKeys.EVAL:\n",
    "        return tf.estimator.EstimatorSpec(\n",
    "                mode=mode,\n",
    "                predictions=predictions,\n",
    "                loss=loss,\n",
    "                eval_metric_ops=eval_metric_ops)\n",
    "\n",
    "    #------bulid optimizer------\n",
    "    if FLAGS.optimizer == 'Adam':\n",
    "        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8)\n",
    "    elif FLAGS.optimizer == 'Adagrad':\n",
    "        optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate, initial_accumulator_value=1e-8)\n",
    "    elif FLAGS.optimizer == 'Momentum':\n",
    "        optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)\n",
    "    elif FLAGS.optimizer == 'ftrl':\n",
    "        optimizer = tf.train.FtrlOptimizer(learning_rate)\n",
    "\n",
    "    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n",
    "\n",
    "    # Provide an estimator spec for `ModeKeys.TRAIN` modes\n",
    "    if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "        return tf.estimator.EstimatorSpec(\n",
    "                mode=mode,\n",
    "                predictions=predictions,\n",
    "                loss=loss,\n",
    "                train_op=train_op)\n",
    "\n",
    "    # Provide an estimator spec for `ModeKeys.EVAL` and `ModeKeys.TRAIN` modes.\n",
    "    #return tf.estimator.EstimatorSpec(\n",
    "    #        mode=mode,\n",
    "    #        loss=loss,\n",
    "    #        train_op=train_op,\n",
    "    #        predictions={\"prob\": pred},\n",
    "    #        eval_metric_ops=eval_metric_ops)\n",
    "\n",
    "def batch_norm_layer(x, train_phase, scope_bn):\n",
    "    bn_train = tf.contrib.layers.batch_norm(x, decay=FLAGS.batch_norm_decay, center=True, scale=True, updates_collections=None, is_training=True,  reuse=None, scope=scope_bn)\n",
    "    bn_infer = tf.contrib.layers.batch_norm(x, decay=FLAGS.batch_norm_decay, center=True, scale=True, updates_collections=None, is_training=False, reuse=True, scope=scope_bn)\n",
    "    z = tf.cond(tf.cast(train_phase, tf.bool), lambda: bn_train, lambda: bn_infer)\n",
    "    return z\n",
    "\n",
    "def set_dist_env():\n",
    "    if FLAGS.dist_mode == 1:        # 本地分布式测试模式1 chief, 1 ps, 1 evaluator\n",
    "        ps_hosts = FLAGS.ps_hosts.split(',')\n",
    "        chief_hosts = FLAGS.chief_hosts.split(',')\n",
    "        task_index = FLAGS.task_index\n",
    "        job_name = FLAGS.job_name\n",
    "        print('ps_host', ps_hosts)\n",
    "        print('chief_hosts', chief_hosts)\n",
    "        print('job_name', job_name)\n",
    "        print('task_index', str(task_index))\n",
    "        # 无worker参数\n",
    "        tf_config = {\n",
    "            'cluster': {'chief': chief_hosts, 'ps': ps_hosts},\n",
    "            'task': {'type': job_name, 'index': task_index }\n",
    "        }\n",
    "        print(json.dumps(tf_config))\n",
    "        os.environ['TF_CONFIG'] = json.dumps(tf_config)\n",
    "    elif FLAGS.dist_mode == 2:      # 集群分布式模式\n",
    "        ps_hosts = FLAGS.ps_hosts.split(',')\n",
    "        worker_hosts = FLAGS.worker_hosts.split(',')\n",
    "        chief_hosts = worker_hosts[0:1] # get first worker as chief\n",
    "        worker_hosts = worker_hosts[2:] # the rest as worker\n",
    "        task_index = FLAGS.task_index\n",
    "        job_name = FLAGS.job_name\n",
    "        print('ps_host', ps_hosts)\n",
    "        print('worker_host', worker_hosts)\n",
    "        print('chief_hosts', chief_hosts)\n",
    "        print('job_name', job_name)\n",
    "        print('task_index', str(task_index))\n",
    "        # use #worker=0 as chief\n",
    "        if job_name == \"worker\" and task_index == 0:\n",
    "            job_name = \"chief\"\n",
    "        # use #worker=1 as evaluator\n",
    "        if job_name == \"worker\" and task_index == 1:\n",
    "            job_name = 'evaluator'\n",
    "            task_index = 0\n",
    "        # the others as worker\n",
    "        if job_name == \"worker\" and task_index > 1:\n",
    "            task_index -= 2\n",
    "\n",
    "        tf_config = {\n",
    "            'cluster': {'chief': chief_hosts, 'worker': worker_hosts, 'ps': ps_hosts},\n",
    "            'task': {'type': job_name, 'index': task_index }\n",
    "        }\n",
    "        print(json.dumps(tf_config))\n",
    "        os.environ['TF_CONFIG'] = json.dumps(tf_config)\n",
    "\n",
    "def main(_):\n",
    "    tr_files = glob.glob(\"%s/tr*libsvm\" % FLAGS.data_dir)\n",
    "    random.shuffle(tr_files)\n",
    "    print(\"tr_files:\", tr_files)\n",
    "    va_files = glob.glob(\"%s/va*libsvm\" % FLAGS.data_dir)\n",
    "    print(\"va_files:\", va_files)\n",
    "    te_files = glob.glob(\"%s/te*libsvm\" % FLAGS.data_dir)\n",
    "    print(\"te_files:\", te_files)\n",
    "\n",
    "    if FLAGS.clear_existing_model:\n",
    "        try:\n",
    "            shutil.rmtree(FLAGS.model_dir)\n",
    "        except Exception as e:\n",
    "            print(e, \"at clear_existing_model\")\n",
    "        else:\n",
    "            print(\"existing model cleaned at %s\" % FLAGS.model_dir)\n",
    "\n",
    "    set_dist_env()\n",
    "\n",
    "    model_params = {\n",
    "        \"field_size\": FLAGS.field_size,\n",
    "        \"feature_size\": FLAGS.feature_size,\n",
    "        \"embedding_size\": FLAGS.embedding_size,\n",
    "        \"learning_rate\": FLAGS.learning_rate,\n",
    "        \"batch_norm_decay\": FLAGS.batch_norm_decay,\n",
    "        \"l2_reg\": FLAGS.l2_reg,\n",
    "        \"deep_layers\": FLAGS.deep_layers,\n",
    "        \"dropout\": FLAGS.dropout\n",
    "    }\n",
    "    config = tf.estimator.RunConfig().replace(session_config = tf.ConfigProto(device_count={'GPU':0, 'CPU':FLAGS.num_threads}),\n",
    "            log_step_count_steps=FLAGS.log_steps, save_summary_steps=FLAGS.log_steps)\n",
    "    DeepFM = tf.estimator.Estimator(model_fn=model_fn, model_dir=FLAGS.model_dir, params=model_params, config=config)\n",
    "\n",
    "    if FLAGS.task_type == 'train':\n",
    "        train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(tr_files, num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size))\n",
    "        eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size), steps=None, start_delay_secs=1000, throttle_secs=1200)\n",
    "        tf.estimator.train_and_evaluate(DeepFM, train_spec, eval_spec)\n",
    "    elif FLAGS.task_type == 'eval':\n",
    "        DeepFM.evaluate(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size))\n",
    "    elif FLAGS.task_type == 'infer':\n",
    "        preds = DeepFM.predict(input_fn=lambda: input_fn(te_files, num_epochs=1, batch_size=FLAGS.batch_size), predict_keys=\"prob\")\n",
    "        with open(FLAGS.data_dir+\"/pred.txt\", \"w\") as fo:\n",
    "            for prob in preds:\n",
    "                fo.write(\"%f\\n\" % (prob['prob']))\n",
    "    elif FLAGS.task_type == 'export':\n",
    "        #feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n",
    "        #feature_spec = {\n",
    "        #    'feat_ids': tf.FixedLenFeature(dtype=tf.int64, shape=[None, FLAGS.field_size]),\n",
    "        #    'feat_vals': tf.FixedLenFeature(dtype=tf.float32, shape=[None, FLAGS.field_size])\n",
    "        #}\n",
    "        #serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)\n",
    "        feature_spec = {\n",
    "            'feat_ids': tf.placeholder(dtype=tf.int64, shape=[None, FLAGS.field_size], name='feat_ids'),\n",
    "            'feat_vals': tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.field_size], name='feat_vals')\n",
    "        }\n",
    "        serving_input_receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)\n",
    "        DeepFM.export_savedmodel(FLAGS.servable_model_dir, serving_input_receiver_fn)\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    #------check Arguments------\n",
    "    if FLAGS.dt_dir == \"\":\n",
    "        FLAGS.dt_dir = (date.today() + timedelta(-1)).strftime('%Y%m%d')\n",
    "    FLAGS.model_dir = FLAGS.model_dir + FLAGS.dt_dir\n",
    "    #FLAGS.data_dir  = FLAGS.data_dir + FLAGS.dt_dir\n",
    "\n",
    "    print('task_type ', FLAGS.task_type)\n",
    "    print('model_dir ', FLAGS.model_dir)\n",
    "    print('data_dir ', FLAGS.data_dir)\n",
    "    print('dt_dir ', FLAGS.dt_dir)\n",
    "    print('num_epochs ', FLAGS.num_epochs)\n",
    "    print('feature_size ', FLAGS.feature_size)\n",
    "    print('field_size ', FLAGS.field_size)\n",
    "    print('embedding_size ', FLAGS.embedding_size)\n",
    "    print('batch_size ', FLAGS.batch_size)\n",
    "    print('deep_layers ', FLAGS.deep_layers)\n",
    "    print('dropout ', FLAGS.dropout)\n",
    "    print('loss_type ', FLAGS.loss_type)\n",
    "    print('optimizer ', FLAGS.optimizer)\n",
    "    print('learning_rate ', FLAGS.learning_rate)\n",
    "    print('batch_norm_decay ', FLAGS.batch_norm_decay)\n",
    "    print('batch_norm ', FLAGS.batch_norm)\n",
    "    print('l2_reg ', FLAGS.l2_reg)\n",
    "\n",
    "    tf.logging.set_verbosity(tf.logging.INFO)\n",
    "    tf.app.run()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "('task_type ', 'train')\n",
      "('model_dir ', './criteo_model20180503')\n",
      "('data_dir ', './criteo_data')\n",
      "('dt_dir ', '20180503')\n",
      "('num_epochs ', 1)\n",
      "('feature_size ', 117581)\n",
      "('field_size ', 39)\n",
      "('embedding_size ', 32)\n",
      "('batch_size ', 256)\n",
      "('deep_layers ', '400,400,400')\n",
      "('dropout ', '0.5,0.5,0.5')\n",
      "('loss_type ', 'log_loss')\n",
      "('optimizer ', 'Adam')\n",
      "('learning_rate ', 0.0005)\n",
      "('batch_norm_decay ', 0.9)\n",
      "('batch_norm ', False)\n",
      "('l2_reg ', 0.0001)\n",
      "('tr_files:', ['./criteo_data/tr.libsvm'])\n",
      "('va_files:', ['./criteo_data/va.libsvm'])\n",
      "('te_files:', ['./criteo_data/te.libsvm'])\n",
      "INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_session_config': device_count {\n",
      "  key: \"CPU\"\n",
      "  value: 8\n",
      "}\n",
      "device_count {\n",
      "  key: \"GPU\"\n",
      "}\n",
      ", '_keep_checkpoint_max': 5, '_task_type': 'worker', '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x537c4d0>, '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_service': None, '_num_ps_replicas': 0, '_tf_random_seed': None, '_master': '', '_num_worker_replicas': 1, '_task_id': 0, '_log_step_count_steps': 1000, '_model_dir': './criteo_model', '_save_summary_steps': 1000}\n",
      "INFO:tensorflow:Running training and evaluation locally (non-distributed).\n",
      "INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 1200 secs (eval_spec.throttle_secs) or training is finished.\n",
      "('Parsing', ['./criteo_data/tr.libsvm'])\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "2018-05-04 23:34:53.147375: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n",
      "INFO:tensorflow:Saving checkpoints for 1 into ./criteo_model/model.ckpt.\n",
      "INFO:tensorflow:loss = 0.6947804, step = 1\n",
      "INFO:tensorflow:loss = 0.51585126, step = 101 (4.948 sec)\n",
      "INFO:tensorflow:loss = 0.4950318, step = 201 (4.408 sec)\n",
      "INFO:tensorflow:loss = 0.5462832, step = 301 (4.357 sec)\n",
      "INFO:tensorflow:loss = 0.5671505, step = 401 (4.368 sec)\n",
      "INFO:tensorflow:loss = 0.45424744, step = 501 (4.300 sec)\n",
      "INFO:tensorflow:loss = 0.5399899, step = 601 (4.274 sec)\n",
      "INFO:tensorflow:loss = 0.49540266, step = 701 (4.234 sec)\n",
      "INFO:tensorflow:loss = 0.5175852, step = 801 (4.294 sec)\n",
      "INFO:tensorflow:loss = 0.4686305, step = 901 (4.314 sec)\n",
      "INFO:tensorflow:global_step/sec: 22.8576\n",
      "INFO:tensorflow:loss = 0.5371931, step = 1001 (4.254 sec)\n",
      "INFO:tensorflow:loss = 0.49340367, step = 1101 (4.243 sec)\n",
      "INFO:tensorflow:loss = 0.49719507, step = 1201 (4.346 sec)\n",
      "INFO:tensorflow:loss = 0.48593232, step = 1301 (4.225 sec)\n",
      "INFO:tensorflow:loss = 0.48725832, step = 1401 (4.238 sec)\n",
      "INFO:tensorflow:loss = 0.4386774, step = 1501 (4.361 sec)\n",
      "INFO:tensorflow:loss = 0.49065983, step = 1601 (4.312 sec)\n",
      "INFO:tensorflow:loss = 0.53164876, step = 1701 (4.272 sec)\n",
      "INFO:tensorflow:loss = 0.40944415, step = 1801 (4.286 sec)\n",
      "INFO:tensorflow:loss = 0.521611, step = 1901 (4.270 sec)\n",
      "INFO:tensorflow:global_step/sec: 23.327\n",
      "INFO:tensorflow:loss = 0.49082595, step = 2001 (4.317 sec)\n",
      "INFO:tensorflow:loss = 0.50453734, step = 2101 (4.302 sec)\n",
      "INFO:tensorflow:loss = 0.49503702, step = 2201 (4.369 sec)\n",
      "INFO:tensorflow:loss = 0.45685932, step = 2301 (4.326 sec)\n",
      "INFO:tensorflow:loss = 0.47562104, step = 2401 (4.326 sec)\n",
      "INFO:tensorflow:loss = 0.5106457, step = 2501 (4.366 sec)\n",
      "INFO:tensorflow:loss = 0.4949795, step = 2601 (4.408 sec)\n",
      "INFO:tensorflow:loss = 0.4684176, step = 2701 (4.442 sec)\n",
      "INFO:tensorflow:loss = 0.43745354, step = 2801 (4.457 sec)\n",
      "INFO:tensorflow:loss = 0.48600715, step = 2901 (4.490 sec)\n",
      "INFO:tensorflow:global_step/sec: 22.7801\n",
      "INFO:tensorflow:loss = 0.4853104, step = 3001 (4.412 sec)\n",
      "INFO:tensorflow:loss = 0.49764964, step = 3101 (4.420 sec)\n",
      "INFO:tensorflow:loss = 0.4432894, step = 3201 (4.496 sec)\n",
      "INFO:tensorflow:loss = 0.46213925, step = 3301 (4.479 sec)\n",
      "INFO:tensorflow:loss = 0.4637582, step = 3401 (4.582 sec)\n",
      "INFO:tensorflow:loss = 0.46756223, step = 3501 (4.504 sec)\n",
      "INFO:tensorflow:loss = 0.46732077, step = 3601 (4.464 sec)\n"
     ]
    }
   ],
   "source": [
    "!python DeepFM.py   --task_type=train \\\n",
    "                    --learning_rate=0.0005 \\\n",
    "                    --optimizer=Adam \\\n",
    "                    --num_epochs=1 \\\n",
    "                    --batch_size=256 \\\n",
    "                    --field_size=39 \\\n",
    "                    --feature_size=117581 \\\n",
    "                    --deep_layers=400,400,400 \\\n",
    "                    --dropout=0.5,0.5,0.5 \\\n",
    "                    --log_steps=1000 \\\n",
    "                    --num_threads=8 \\\n",
    "                    --model_dir=./criteo_model/DeepFM \\\n",
    "                    --data_dir=./criteo_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### NFM\n",
    "reference：<br>[从ctr预估问题看看f(x)设计—DNN篇](https://zhuanlan.zhihu.com/p/28202287)<br> [深度学习在CTR预估中的应用](https://zhuanlan.zhihu.com/p/35484389)\n",
    "\n",
    "NFM = LR + Embedding + Bi-Interaction Pooling + MLP\n",
    "\n",
    "![](https://pic4.zhimg.com/v2-582ade4feb65a88b828942c460e08192_r.jpg)\n",
    "\n",
    "对不同特征做相同维数的embedding向量。接下来，这些embedding向量两两做element-wise的相乘运算得到B-interaction layer。(element-wide运算举例: (1,2,3)element-wide相乘(4,5,6)结果是(4,10,18)。)\n",
    "\n",
    "B-interaction Layer 得到的是一个和embedding维数相同的向量。然后后面接几个隐藏层输出结果。\n",
    "\n",
    "大家思考一下，如果B-interaction layer后面不接隐藏层，直接把向量的元素相加输出结果, 就是一个FM，现在后面增加了隐藏层，相当于做了更高阶的FM，更加增强了非线性表达能力。\n",
    "\n",
    "NFM 在 embedding 做了 bi-interaction 操作来做特征的交叉处理，优点是网络参数从 n 直接压缩到 k（比 FNN 和 DeepFM 的 f\\*k 还少），降低了网络复杂度，能够加速网络的训练得到模型；但同时这种方法也可能带来较大的信息损失。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# %load NFM.py\n",
    "#!/usr/bin/env python\n",
    "\"\"\"\n",
    "TensorFlow Implementation of <<Neural Factorization Machines for Sparse Predictive Analytics>> with the fellowing features：\n",
    "#1 Input pipline using Dataset high level API, Support parallel and prefetch reading\n",
    "#2 Train pipline using Coustom Estimator by rewriting model_fn\n",
    "#3 Support distincted training by TF_CONFIG\n",
    "#4 Support export model for TensorFlow Serving\n",
    "\n",
    "by lambdaji\n",
    "\"\"\"\n",
    "#from __future__ import absolute_import\n",
    "#from __future__ import division\n",
    "#from __future__ import print_function\n",
    "\n",
    "#import argparse\n",
    "import shutil\n",
    "#import sys\n",
    "import os\n",
    "import json\n",
    "import glob\n",
    "from datetime import date, timedelta\n",
    "from time import time\n",
    "#import gc\n",
    "#from multiprocessing import Process\n",
    "\n",
    "#import math\n",
    "import random\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "\n",
    "#################### CMD Arguments ####################\n",
    "FLAGS = tf.app.flags.FLAGS\n",
    "tf.app.flags.DEFINE_integer(\"dist_mode\", 0, \"distribuion mode {0-loacal, 1-single_dist, 2-multi_dist}\")\n",
    "tf.app.flags.DEFINE_string(\"ps_hosts\", '', \"Comma-separated list of hostname:port pairs\")\n",
    "tf.app.flags.DEFINE_string(\"worker_hosts\", '', \"Comma-separated list of hostname:port pairs\")\n",
    "tf.app.flags.DEFINE_string(\"job_name\", '', \"One of 'ps', 'worker'\")\n",
    "tf.app.flags.DEFINE_integer(\"task_index\", 0, \"Index of task within the job\")\n",
    "tf.app.flags.DEFINE_integer(\"num_threads\", 16, \"Number of threads\")\n",
    "tf.app.flags.DEFINE_integer(\"feature_size\", 0, \"Number of features\")\n",
    "tf.app.flags.DEFINE_integer(\"field_size\", 0, \"Number of fields\")\n",
    "tf.app.flags.DEFINE_integer(\"embedding_size\", 64, \"Embedding size\")\n",
    "tf.app.flags.DEFINE_integer(\"num_epochs\", 10, \"Number of epochs\")\n",
    "tf.app.flags.DEFINE_integer(\"batch_size\", 128, \"Number of batch size\")\n",
    "tf.app.flags.DEFINE_integer(\"log_steps\", 1000, \"save summary every steps\")\n",
    "tf.app.flags.DEFINE_float(\"learning_rate\", 0.05, \"learning rate\")\n",
    "tf.app.flags.DEFINE_float(\"l2_reg\", 0.001, \"L2 regularization\")\n",
    "tf.app.flags.DEFINE_string(\"loss_type\", 'log_loss', \"loss type {square_loss, log_loss}\")\n",
    "tf.app.flags.DEFINE_string(\"optimizer\", 'Adam', \"optimizer type {Adam, Adagrad, GD, Momentum}\")\n",
    "tf.app.flags.DEFINE_string(\"deep_layers\", '128,64', \"deep layers\")\n",
    "tf.app.flags.DEFINE_string(\"dropout\", '0.5,0.8,0.8', \"dropout rate\")\n",
    "tf.app.flags.DEFINE_boolean(\"batch_norm\", False, \"perform batch normaization (True or False)\")\n",
    "tf.app.flags.DEFINE_float(\"batch_norm_decay\", 0.9, \"decay for the moving average(recommend trying decay=0.9)\")\n",
    "tf.app.flags.DEFINE_string(\"data_dir\", '', \"data dir\")\n",
    "tf.app.flags.DEFINE_string(\"dt_dir\", '', \"data dt partition\")\n",
    "tf.app.flags.DEFINE_string(\"model_dir\", '', \"model check point dir\")\n",
    "tf.app.flags.DEFINE_string(\"servable_model_dir\", '', \"export servable model for TensorFlow Serving\")\n",
    "tf.app.flags.DEFINE_string(\"task_type\", 'train', \"task type {train, infer, eval, export}\")\n",
    "tf.app.flags.DEFINE_boolean(\"clear_existing_model\", False, \"clear existing model or not\")\n",
    "\n",
    "#1 1:0.5 2:0.03519 3:1 4:0.02567 7:0.03708 8:0.01705 9:0.06296 10:0.18185 11:0.02497 12:1 14:0.02565 15:0.03267 17:0.0247 18:0.03158 20:1 22:1 23:0.13169 24:0.02933 27:0.18159 31:0.0177 34:0.02888 38:1 51:1 63:1 132:1 164:1 236:1\n",
    "def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False):\n",
    "    print('Parsing', filenames)\n",
    "    def decode_libsvm(line):\n",
    "        #columns = tf.decode_csv(value, record_defaults=CSV_COLUMN_DEFAULTS)\n",
    "        #features = dict(zip(CSV_COLUMNS, columns))\n",
    "        #labels = features.pop(LABEL_COLUMN)\n",
    "        columns = tf.string_split([line], ' ')\n",
    "        labels = tf.string_to_number(columns.values[0], out_type=tf.float32)\n",
    "        splits = tf.string_split(columns.values[1:], ':')\n",
    "        id_vals = tf.reshape(splits.values,splits.dense_shape)\n",
    "        feat_ids, feat_vals = tf.split(id_vals,num_or_size_splits=2,axis=1)\n",
    "        feat_ids = tf.string_to_number(feat_ids, out_type=tf.int32)\n",
    "        feat_vals = tf.string_to_number(feat_vals, out_type=tf.float32)\n",
    "        return {\"feat_ids\": feat_ids, \"feat_vals\": feat_vals}, labels\n",
    "\n",
    "    # Extract lines from input files using the Dataset API, can pass one filename or filename list\n",
    "    dataset = tf.data.TextLineDataset(filenames).map(decode_libsvm, num_parallel_calls=10).prefetch(500000)    # multi-thread pre-process then prefetch\n",
    "\n",
    "    # Randomizes input using a window of 256 elements (read into memory)\n",
    "    if perform_shuffle:\n",
    "        dataset = dataset.shuffle(buffer_size=256)\n",
    "\n",
    "    # epochs from blending together.\n",
    "    dataset = dataset.repeat(num_epochs)\n",
    "    dataset = dataset.batch(batch_size) # Batch size to use\n",
    "\n",
    "    iterator = dataset.make_one_shot_iterator()\n",
    "    batch_features, batch_labels = iterator.get_next()\n",
    "    #return tf.reshape(batch_ids,shape=[-1,field_size]), tf.reshape(batch_vals,shape=[-1,field_size]), batch_labels\n",
    "    return batch_features, batch_labels\n",
    "\n",
    "def model_fn(features, labels, mode, params):\n",
    "    \"\"\"Bulid Model function f(x) for Estimator.\"\"\"\n",
    "    #------hyperparameters----\n",
    "    field_size = params[\"field_size\"]\n",
    "    feature_size = params[\"feature_size\"]\n",
    "    embedding_size = params[\"embedding_size\"]\n",
    "    l2_reg = params[\"l2_reg\"]\n",
    "    learning_rate = params[\"learning_rate\"]\n",
    "    #optimizer = params[\"optimizer\"]\n",
    "    layers = map(int, params[\"deep_layers\"].split(','))\n",
    "    dropout = map(float, params[\"dropout\"].split(','))\n",
    "\n",
    "    #------bulid weights------\n",
    "    Global_Bias = tf.get_variable(name='bias', shape=[1], initializer=tf.constant_initializer(0.0))\n",
    "    Feat_Bias = tf.get_variable(name='linear', shape=[feature_size], initializer=tf.glorot_normal_initializer())\n",
    "    Feat_Emb = tf.get_variable(name='emb', shape=[feature_size,embedding_size], initializer=tf.glorot_normal_initializer())\n",
    "\n",
    "    #------build feaure-------\n",
    "    feat_ids  = features['feat_ids']\n",
    "    feat_ids = tf.reshape(feat_ids,shape=[-1,field_size])\n",
    "    feat_vals = features['feat_vals']\n",
    "    feat_vals = tf.reshape(feat_vals,shape=[-1,field_size])\n",
    "\n",
    "    #------build f(x)------\n",
    "    with tf.variable_scope(\"Linear-part\"):\n",
    "        feat_wgts = tf.nn.embedding_lookup(Feat_Bias, feat_ids) \t\t# None * F * 1\n",
    "        y_linear = tf.reduce_sum(tf.multiply(feat_wgts, feat_vals),1)\n",
    "\n",
    "    with tf.variable_scope(\"BiInter-part\"):\n",
    "        embeddings = tf.nn.embedding_lookup(Feat_Emb, feat_ids) \t\t# None * F * K\n",
    "        feat_vals = tf.reshape(feat_vals, shape=[-1, field_size, 1])\n",
    "        embeddings = tf.multiply(embeddings, feat_vals) \t\t\t\t# vij * xi\n",
    "        sum_square_emb = tf.square(tf.reduce_sum(embeddings,1))\n",
    "        square_sum_emb = tf.reduce_sum(tf.square(embeddings),1)\n",
    "        deep_inputs = 0.5*tf.subtract(sum_square_emb, square_sum_emb)\t# None * K\n",
    "\n",
    "    with tf.variable_scope(\"Deep-part\"):\n",
    "        if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "            train_phase = True\n",
    "        else:\n",
    "            train_phase = False\n",
    "\n",
    "        if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "            deep_inputs = tf.nn.dropout(deep_inputs, keep_prob=dropout[0]) \t\t\t\t\t\t# None * K\n",
    "        for i in range(len(layers)):\n",
    "            deep_inputs = tf.contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=layers[i], \\\n",
    "                weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg), scope='mlp%d' % i)\n",
    "\n",
    "            if FLAGS.batch_norm:\n",
    "                deep_inputs = batch_norm_layer(deep_inputs, train_phase=train_phase, scope_bn='bn_%d' %i)   #放在RELU之后 https://github.com/ducha-aiki/caffenet-benchmark/blob/master/batchnorm.md#bn----before-or-after-relu\n",
    "            if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "                deep_inputs = tf.nn.dropout(deep_inputs, keep_prob=dropout[i])                              #Apply Dropout after all BN layers and set dropout=0.8(drop_ratio=0.2)\n",
    "                #deep_inputs = tf.layers.dropout(inputs=deep_inputs, rate=dropout[i], training=mode == tf.estimator.ModeKeys.TRAIN)\n",
    "\n",
    "        y_deep = tf.contrib.layers.fully_connected(inputs=deep_inputs, num_outputs=1, activation_fn=tf.identity, \\\n",
    "            weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg), scope='deep_out')\n",
    "        y_d = tf.reshape(y_deep,shape=[-1])\n",
    "\n",
    "    with tf.variable_scope(\"NFM-out\"):\n",
    "        #y_bias = Global_Bias * tf.ones_like(labels, dtype=tf.float32)  # None * 1  warning;这里不能用label，否则调用predict/export函数会出错，train/evaluate正常；初步判断estimator做了优化，用不到label时不传\n",
    "        y_bias = Global_Bias * tf.ones_like(y_d, dtype=tf.float32)     \t# None * 1\n",
    "        y = y_bias + y_linear + y_d\n",
    "        pred = tf.sigmoid(y)\n",
    "\n",
    "    predictions={\"prob\": pred}\n",
    "    export_outputs = {tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(predictions)}\n",
    "    # Provide an estimator spec for `ModeKeys.PREDICT`\n",
    "    if mode == tf.estimator.ModeKeys.PREDICT:\n",
    "        return tf.estimator.EstimatorSpec(\n",
    "                mode=mode,\n",
    "                predictions=predictions,\n",
    "                export_outputs=export_outputs)\n",
    "\n",
    "    #------bulid loss------\n",
    "    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=labels)) + \\\n",
    "        l2_reg * tf.nn.l2_loss(Feat_Bias) + l2_reg * tf.nn.l2_loss(Feat_Emb)\n",
    "\n",
    "    # Provide an estimator spec for `ModeKeys.EVAL`\n",
    "    eval_metric_ops = {\n",
    "        \"auc\": tf.metrics.auc(labels, pred)\n",
    "    }\n",
    "    if mode == tf.estimator.ModeKeys.EVAL:\n",
    "        return tf.estimator.EstimatorSpec(\n",
    "                mode=mode,\n",
    "                predictions=predictions,\n",
    "                loss=loss,\n",
    "                eval_metric_ops=eval_metric_ops)\n",
    "\n",
    "    #------bulid optimizer------\n",
    "    if FLAGS.optimizer == 'Adam':\n",
    "        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8)\n",
    "    elif FLAGS.optimizer == 'Adagrad':\n",
    "        optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate, initial_accumulator_value=1e-8)\n",
    "    elif FLAGS.optimizer == 'Momentum':\n",
    "        optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)\n",
    "    elif FLAGS.optimizer == 'ftrl':\n",
    "        optimizer = tf.train.FtrlOptimizer(learning_rate)\n",
    "\n",
    "    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n",
    "\n",
    "    # Provide an estimator spec for `ModeKeys.TRAIN` modes\n",
    "    if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "        return tf.estimator.EstimatorSpec(\n",
    "                mode=mode,\n",
    "                predictions=predictions,\n",
    "                loss=loss,\n",
    "                train_op=train_op)\n",
    "\n",
    "    # Provide an estimator spec for `ModeKeys.EVAL` and `ModeKeys.TRAIN` modes.\n",
    "    #return tf.estimator.EstimatorSpec(\n",
    "    #        mode=mode,\n",
    "    #        loss=loss,\n",
    "    #        train_op=train_op,\n",
    "    #        predictions={\"prob\": pred},\n",
    "    #        eval_metric_ops=eval_metric_ops)\n",
    "\n",
    "def batch_norm_layer(x, train_phase, scope_bn):\n",
    "    bn_train = tf.contrib.layers.batch_norm(x, decay=FLAGS.batch_norm_decay, center=True, scale=True, updates_collections=None, is_training=True,  reuse=None, scope=scope_bn)\n",
    "    bn_infer = tf.contrib.layers.batch_norm(x, decay=FLAGS.batch_norm_decay, center=True, scale=True, updates_collections=None, is_training=False, reuse=True, scope=scope_bn)\n",
    "    z = tf.cond(tf.cast(train_phase, tf.bool), lambda: bn_train, lambda: bn_infer)\n",
    "    return z\n",
    "\n",
    "def set_dist_env():\n",
    "    if FLAGS.dist_mode == 1:        # 本地分布式测试模式1 chief, 1 ps, 1 evaluator\n",
    "        ps_hosts = FLAGS.ps_hosts.split(',')\n",
    "        chief_hosts = FLAGS.chief_hosts.split(',')\n",
    "        task_index = FLAGS.task_index\n",
    "        job_name = FLAGS.job_name\n",
    "        print('ps_host', ps_hosts)\n",
    "        print('chief_hosts', chief_hosts)\n",
    "        print('job_name', job_name)\n",
    "        print('task_index', str(task_index))\n",
    "        # 无worker参数\n",
    "        tf_config = {\n",
    "            'cluster': {'chief': chief_hosts, 'ps': ps_hosts},\n",
    "            'task': {'type': job_name, 'index': task_index }\n",
    "        }\n",
    "        print(json.dumps(tf_config))\n",
    "        os.environ['TF_CONFIG'] = json.dumps(tf_config)\n",
    "    elif FLAGS.dist_mode == 2:      # 集群分布式模式\n",
    "        ps_hosts = FLAGS.ps_hosts.split(',')\n",
    "        worker_hosts = FLAGS.worker_hosts.split(',')\n",
    "        chief_hosts = worker_hosts[0:1] # get first worker as chief\n",
    "        worker_hosts = worker_hosts[2:] # the rest as worker\n",
    "        task_index = FLAGS.task_index\n",
    "        job_name = FLAGS.job_name\n",
    "        print('ps_host', ps_hosts)\n",
    "        print('worker_host', worker_hosts)\n",
    "        print('chief_hosts', chief_hosts)\n",
    "        print('job_name', job_name)\n",
    "        print('task_index', str(task_index))\n",
    "        # use #worker=0 as chief\n",
    "        if job_name == \"worker\" and task_index == 0:\n",
    "            job_name = \"chief\"\n",
    "        # use #worker=1 as evaluator\n",
    "        if job_name == \"worker\" and task_index == 1:\n",
    "            job_name = 'evaluator'\n",
    "            task_index = 0\n",
    "        # the others as worker\n",
    "        if job_name == \"worker\" and task_index > 1:\n",
    "            task_index -= 2\n",
    "\n",
    "        tf_config = {\n",
    "            'cluster': {'chief': chief_hosts, 'worker': worker_hosts, 'ps': ps_hosts},\n",
    "            'task': {'type': job_name, 'index': task_index }\n",
    "        }\n",
    "        print(json.dumps(tf_config))\n",
    "        os.environ['TF_CONFIG'] = json.dumps(tf_config)\n",
    "\n",
    "def main(_):\n",
    "    tr_files = glob.glob(\"%s/tr*libsvm\" % FLAGS.data_dir)\n",
    "    random.shuffle(tr_files)\n",
    "    print(\"tr_files:\", tr_files)\n",
    "    va_files = glob.glob(\"%s/va*libsvm\" % FLAGS.data_dir)\n",
    "    print(\"va_files:\", va_files)\n",
    "    te_files = glob.glob(\"%s/te*libsvm\" % FLAGS.data_dir)\n",
    "    print(\"te_files:\", te_files)\n",
    "\n",
    "    if FLAGS.clear_existing_model:\n",
    "        try:\n",
    "            shutil.rmtree(FLAGS.model_dir)\n",
    "        except Exception as e:\n",
    "            print(e, \"at clear_existing_model\")\n",
    "        else:\n",
    "            print(\"existing model cleaned at %s\" % FLAGS.model_dir)\n",
    "\n",
    "    set_dist_env()\n",
    "\n",
    "    model_params = {\n",
    "        \"field_size\": FLAGS.field_size,\n",
    "        \"feature_size\": FLAGS.feature_size,\n",
    "        \"embedding_size\": FLAGS.embedding_size,\n",
    "        \"learning_rate\": FLAGS.learning_rate,\n",
    "        \"l2_reg\": FLAGS.l2_reg,\n",
    "        \"deep_layers\": FLAGS.deep_layers,\n",
    "        \"dropout\": FLAGS.dropout\n",
    "    }\n",
    "    config = tf.estimator.RunConfig().replace(session_config = tf.ConfigProto(device_count={'GPU':0, 'CPU':FLAGS.num_threads}),\n",
    "            log_step_count_steps=FLAGS.log_steps, save_summary_steps=FLAGS.log_steps)\n",
    "    DeepFM = tf.estimator.Estimator(model_fn=model_fn, model_dir=FLAGS.model_dir, params=model_params, config=config)\n",
    "\n",
    "    if FLAGS.task_type == 'train':\n",
    "        train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(tr_files, num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size))\n",
    "        eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size), steps=None, start_delay_secs=1000, throttle_secs=1200)\n",
    "        tf.estimator.train_and_evaluate(DeepFM, train_spec, eval_spec)\n",
    "    elif FLAGS.task_type == 'eval':\n",
    "        DeepFM.evaluate(input_fn=lambda: input_fn(va_files, num_epochs=1, batch_size=FLAGS.batch_size))\n",
    "    elif FLAGS.task_type == 'infer':\n",
    "        preds = DeepFM.predict(input_fn=lambda: input_fn(te_files, num_epochs=1, batch_size=FLAGS.batch_size), predict_keys=\"prob\")\n",
    "        with open(FLAGS.data_dir+\"/pred.txt\", \"w\") as fo:\n",
    "            for prob in preds:\n",
    "                fo.write(\"%f\\n\" % (prob['prob']))\n",
    "    elif FLAGS.task_type == 'export':\n",
    "        #feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n",
    "        #feature_spec = {\n",
    "        #    'feat_ids': tf.FixedLenFeature(dtype=tf.int64, shape=[None, FLAGS.field_size]),\n",
    "        #    'feat_vals': tf.FixedLenFeature(dtype=tf.float32, shape=[None, FLAGS.field_size])\n",
    "        #}\n",
    "        #serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)\n",
    "        feature_spec = {\n",
    "            'feat_ids': tf.placeholder(dtype=tf.int64, shape=[None, FLAGS.field_size], name='feat_ids'),\n",
    "            'feat_vals': tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.field_size], name='feat_vals')\n",
    "        }\n",
    "        serving_input_receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)\n",
    "        DeepFM.export_savedmodel(FLAGS.servable_model_dir, serving_input_receiver_fn)\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    #------check Arguments------\n",
    "    if FLAGS.dt_dir == \"\":\n",
    "        FLAGS.dt_dir = (date.today() + timedelta(-1)).strftime('%Y%m%d')\n",
    "    FLAGS.model_dir = FLAGS.model_dir + FLAGS.dt_dir\n",
    "    #FLAGS.data_dir  = FLAGS.data_dir + FLAGS.dt_dir\n",
    "\n",
    "    print('task_type ', FLAGS.task_type)\n",
    "    print('model_dir ', FLAGS.model_dir)\n",
    "    print('data_dir ', FLAGS.data_dir)\n",
    "    print('dt_dir ', FLAGS.dt_dir)\n",
    "    print('num_epochs ', FLAGS.num_epochs)\n",
    "    print('feature_size ', FLAGS.feature_size)\n",
    "    print('field_size ', FLAGS.field_size)\n",
    "    print('embedding_size ', FLAGS.embedding_size)\n",
    "    print('batch_size ', FLAGS.batch_size)\n",
    "    print('deep_layers ', FLAGS.deep_layers)\n",
    "    print('dropout ', FLAGS.dropout)\n",
    "    print('loss_type ', FLAGS.loss_type)\n",
    "    print('optimizer ', FLAGS.optimizer)\n",
    "    print('learning_rate ', FLAGS.learning_rate)\n",
    "    print('l2_reg ', FLAGS.l2_reg)\n",
    "\n",
    "    tf.logging.set_verbosity(tf.logging.INFO)\n",
    "    tf.app.run()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "('task_type ', 'train')\n",
      "('model_dir ', './criteo_model/NFM20180504')\n",
      "('data_dir ', './criteo_data')\n",
      "('dt_dir ', '20180504')\n",
      "('num_epochs ', 1)\n",
      "('feature_size ', 117581)\n",
      "('field_size ', 39)\n",
      "('embedding_size ', 64)\n",
      "('batch_size ', 256)\n",
      "('deep_layers ', '400,400,400')\n",
      "('dropout ', '0.5,0.5,0.5')\n",
      "('loss_type ', 'log_loss')\n",
      "('optimizer ', 'Adam')\n",
      "('learning_rate ', 0.0005)\n",
      "('l2_reg ', 0.001)\n",
      "('tr_files:', ['./criteo_data/tr.libsvm'])\n",
      "('va_files:', ['./criteo_data/va.libsvm'])\n",
      "('te_files:', ['./criteo_data/te.libsvm'])\n",
      "INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_session_config': device_count {\n",
      "  key: \"CPU\"\n",
      "  value: 8\n",
      "}\n",
      "device_count {\n",
      "  key: \"GPU\"\n",
      "}\n",
      ", '_keep_checkpoint_max': 5, '_task_type': 'worker', '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x6455490>, '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_service': None, '_num_ps_replicas': 0, '_tf_random_seed': None, '_master': '', '_num_worker_replicas': 1, '_task_id': 0, '_log_step_count_steps': 1000, '_model_dir': './criteo_model/NFM', '_save_summary_steps': 1000}\n",
      "INFO:tensorflow:Running training and evaluation locally (non-distributed).\n",
      "INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 1200 secs (eval_spec.throttle_secs) or training is finished.\n",
      "('Parsing', ['./criteo_data/tr.libsvm'])\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "2018-05-05 09:12:20.744054: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n",
      "INFO:tensorflow:Saving checkpoints for 1 into ./criteo_model/NFM/model.ckpt.\n",
      "INFO:tensorflow:loss = 0.74344236, step = 1\n",
      "INFO:tensorflow:loss = 0.52700615, step = 101 (12.126 sec)\n",
      "INFO:tensorflow:loss = 0.53884023, step = 201 (8.622 sec)\n",
      "INFO:tensorflow:loss = 0.5741194, step = 301 (8.680 sec)\n",
      "INFO:tensorflow:loss = 0.59511054, step = 401 (8.699 sec)\n",
      "INFO:tensorflow:loss = 0.47535682, step = 501 (8.683 sec)\n",
      "INFO:tensorflow:loss = 0.5811361, step = 601 (9.074 sec)\n",
      "INFO:tensorflow:loss = 0.5244339, step = 701 (9.141 sec)\n",
      "INFO:tensorflow:loss = 0.5584926, step = 801 (9.293 sec)\n",
      "INFO:tensorflow:loss = 0.51474106, step = 901 (9.456 sec)\n",
      "INFO:tensorflow:global_step/sec: 10.8086\n",
      "INFO:tensorflow:loss = 0.5706619, step = 1001 (8.749 sec)\n",
      "INFO:tensorflow:loss = 0.523185, step = 1101 (9.152 sec)\n",
      "INFO:tensorflow:loss = 0.5341173, step = 1201 (8.163 sec)\n",
      "INFO:tensorflow:loss = 0.5158627, step = 1301 (8.788 sec)\n",
      "INFO:tensorflow:loss = 0.51566017, step = 1401 (8.700 sec)\n",
      "INFO:tensorflow:loss = 0.4592861, step = 1501 (8.962 sec)\n",
      "INFO:tensorflow:loss = 0.50802827, step = 1601 (8.990 sec)\n",
      "INFO:tensorflow:loss = 0.5538678, step = 1701 (8.596 sec)\n",
      "INFO:tensorflow:loss = 0.4346152, step = 1801 (8.267 sec)\n",
      "INFO:tensorflow:loss = 0.5406091, step = 1901 (8.907 sec)\n",
      "INFO:tensorflow:global_step/sec: 11.4221\n",
      "INFO:tensorflow:loss = 0.5177407, step = 2001 (9.026 sec)\n",
      "INFO:tensorflow:loss = 0.50947416, step = 2101 (10.118 sec)\n",
      "INFO:tensorflow:loss = 0.5290449, step = 2201 (8.635 sec)\n",
      "INFO:tensorflow:loss = 0.48367974, step = 2301 (9.689 sec)\n",
      "INFO:tensorflow:loss = 0.5103478, step = 2401 (9.785 sec)\n",
      "INFO:tensorflow:loss = 0.5290227, step = 2501 (9.748 sec)\n",
      "INFO:tensorflow:loss = 0.5219102, step = 2601 (9.889 sec)\n",
      "INFO:tensorflow:loss = 0.5131693, step = 2701 (10.787 sec)\n",
      "INFO:tensorflow:loss = 0.47013655, step = 2801 (11.150 sec)\n",
      "INFO:tensorflow:loss = 0.5133655, step = 2901 (12.453 sec)\n",
      "INFO:tensorflow:global_step/sec: 9.68192\n",
      "INFO:tensorflow:loss = 0.5253961, step = 3001 (11.027 sec)\n",
      "INFO:tensorflow:loss = 0.53593737, step = 3101 (10.576 sec)\n",
      "INFO:tensorflow:loss = 0.47377995, step = 3201 (9.975 sec)\n",
      "INFO:tensorflow:loss = 0.5179897, step = 3301 (9.655 sec)\n",
      "INFO:tensorflow:loss = 0.5014092, step = 3401 (8.827 sec)\n",
      "INFO:tensorflow:loss = 0.50651914, step = 3501 (9.877 sec)\n",
      "INFO:tensorflow:loss = 0.4893608, step = 3601 (7.170 sec)\n",
      "INFO:tensorflow:loss = 0.5037479, step = 3701 (7.128 sec)\n",
      "INFO:tensorflow:loss = 0.46921813, step = 3801 (7.062 sec)\n",
      "INFO:tensorflow:loss = 0.5224898, step = 3901 (6.815 sec)\n",
      "INFO:tensorflow:global_step/sec: 11.7165\n",
      "INFO:tensorflow:loss = 0.5555479, step = 4001 (8.265 sec)\n",
      "INFO:tensorflow:loss = 0.53638494, step = 4101 (9.037 sec)\n",
      "INFO:tensorflow:loss = 0.58234245, step = 4201 (8.601 sec)\n",
      "INFO:tensorflow:loss = 0.57939863, step = 4301 (8.564 sec)\n",
      "INFO:tensorflow:loss = 0.51434916, step = 4401 (8.940 sec)\n",
      "INFO:tensorflow:loss = 0.5549449, step = 4501 (8.833 sec)\n",
      "INFO:tensorflow:loss = 0.5062487, step = 4601 (8.651 sec)\n",
      "INFO:tensorflow:loss = 0.5529063, step = 4701 (8.658 sec)\n",
      "INFO:tensorflow:loss = 0.49861303, step = 4801 (8.808 sec)\n",
      "INFO:tensorflow:loss = 0.54094946, step = 4901 (8.782 sec)\n",
      "INFO:tensorflow:global_step/sec: 11.413\n",
      "INFO:tensorflow:loss = 0.49571908, step = 5001 (8.745 sec)\n",
      "INFO:tensorflow:loss = 0.5437416, step = 5101 (8.432 sec)\n",
      "INFO:tensorflow:loss = 0.5013172, step = 5201 (8.366 sec)\n",
      "INFO:tensorflow:loss = 0.50875455, step = 5301 (8.017 sec)\n",
      "INFO:tensorflow:loss = 0.5869225, step = 5401 (8.335 sec)\n",
      "INFO:tensorflow:loss = 0.5402778, step = 5501 (8.121 sec)\n",
      "INFO:tensorflow:loss = 0.52757925, step = 5601 (8.187 sec)\n",
      "INFO:tensorflow:loss = 0.48195118, step = 5701 (7.991 sec)\n",
      "INFO:tensorflow:loss = 0.4779031, step = 5801 (7.904 sec)\n",
      "INFO:tensorflow:loss = 0.5278434, step = 5901 (7.916 sec)\n",
      "INFO:tensorflow:global_step/sec: 12.3543\n",
      "INFO:tensorflow:loss = 0.5329895, step = 6001 (7.673 sec)\n",
      "INFO:tensorflow:loss = 0.5151729, step = 6101 (7.622 sec)\n",
      "INFO:tensorflow:loss = 0.62112814, step = 6201 (7.493 sec)\n",
      "INFO:tensorflow:loss = 0.48736763, step = 6301 (7.491 sec)\n",
      "INFO:tensorflow:loss = 0.45068923, step = 6401 (7.353 sec)\n",
      "INFO:tensorflow:loss = 0.51698387, step = 6501 (7.221 sec)\n",
      "INFO:tensorflow:loss = 0.5078758, step = 6601 (7.112 sec)\n",
      "INFO:tensorflow:loss = 0.53784084, step = 6701 (7.051 sec)\n",
      "INFO:tensorflow:loss = 0.568355, step = 6801 (6.848 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 6863 into ./criteo_model/NFM/model.ckpt.\n",
      "INFO:tensorflow:loss = 0.5869765, step = 6901 (7.007 sec)\n",
      "INFO:tensorflow:global_step/sec: 13.877\n",
      "INFO:tensorflow:loss = 0.50776565, step = 7001 (6.864 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 7034 into ./criteo_model/NFM/model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 0.66966015.\n",
      "('Parsing', ['./criteo_data/va.libsvm'])\n",
      "INFO:tensorflow:Starting evaluation at 2018-05-05-01:22:35\n",
      "INFO:tensorflow:Restoring parameters from ./criteo_model/NFM/model.ckpt-7034\n",
      "INFO:tensorflow:Finished evaluation at 2018-05-05-01:22:58\n",
      "INFO:tensorflow:Saving dict for global step 7034: auc = 0.7614266, global_step = 7034, loss = 0.50850546\n",
      "('Parsing', ['./criteo_data/tr.libsvm'])\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Restoring parameters from ./criteo_model/NFM/model.ckpt-7034\n",
      "INFO:tensorflow:Saving checkpoints for 7035 into ./criteo_model/NFM/model.ckpt.\n",
      "INFO:tensorflow:loss = 0.53954387, step = 7035\n",
      "INFO:tensorflow:loss = 0.506534, step = 7135 (10.071 sec)\n",
      "INFO:tensorflow:loss = 0.5184156, step = 7235 (8.270 sec)\n",
      "INFO:tensorflow:loss = 0.5448781, step = 7335 (8.497 sec)\n",
      "INFO:tensorflow:loss = 0.58426636, step = 7435 (7.031 sec)\n",
      "INFO:tensorflow:loss = 0.4775302, step = 7535 (7.547 sec)\n",
      "INFO:tensorflow:loss = 0.57145935, step = 7635 (8.272 sec)\n",
      "INFO:tensorflow:loss = 0.52330667, step = 7735 (7.936 sec)\n",
      "INFO:tensorflow:loss = 0.52791095, step = 7835 (7.510 sec)\n",
      "INFO:tensorflow:loss = 0.5160444, step = 7935 (7.842 sec)\n",
      "INFO:tensorflow:global_step/sec: 12.3632\n",
      "INFO:tensorflow:loss = 0.54860413, step = 8035 (7.911 sec)\n",
      "INFO:tensorflow:loss = 0.5232839, step = 8135 (8.025 sec)\n",
      "INFO:tensorflow:loss = 0.5313403, step = 8235 (7.744 sec)\n",
      "INFO:tensorflow:loss = 0.5083723, step = 8335 (8.124 sec)\n",
      "INFO:tensorflow:loss = 0.5127937, step = 8435 (7.833 sec)\n",
      "INFO:tensorflow:loss = 0.45451465, step = 8535 (8.071 sec)\n",
      "INFO:tensorflow:loss = 0.5148269, step = 8635 (8.239 sec)\n",
      "INFO:tensorflow:loss = 0.5475166, step = 8735 (8.724 sec)\n",
      "INFO:tensorflow:loss = 0.4436438, step = 8835 (7.380 sec)\n",
      "INFO:tensorflow:loss = 0.527986, step = 8935 (6.852 sec)\n",
      "INFO:tensorflow:global_step/sec: 12.8544\n",
      "INFO:tensorflow:loss = 0.5004729, step = 9035 (6.800 sec)\n",
      "INFO:tensorflow:loss = 0.5152983, step = 9135 (6.864 sec)\n",
      "INFO:tensorflow:loss = 0.5443342, step = 9235 (6.611 sec)\n",
      "INFO:tensorflow:loss = 0.48795786, step = 9335 (8.363 sec)\n",
      "INFO:tensorflow:loss = 0.50839627, step = 9435 (8.677 sec)\n",
      "INFO:tensorflow:loss = 0.53861755, step = 9535 (8.617 sec)\n",
      "INFO:tensorflow:loss = 0.5102945, step = 9635 (8.498 sec)\n",
      "INFO:tensorflow:loss = 0.49490649, step = 9735 (8.507 sec)\n",
      "INFO:tensorflow:loss = 0.46887958, step = 9835 (8.529 sec)\n",
      "INFO:tensorflow:loss = 0.5098571, step = 9935 (8.540 sec)\n",
      "INFO:tensorflow:global_step/sec: 12.2224\n",
      "INFO:tensorflow:loss = 0.5108617, step = 10035 (8.612 sec)\n",
      "INFO:tensorflow:loss = 0.5259123, step = 10135 (8.646 sec)\n",
      "INFO:tensorflow:loss = 0.49567312, step = 10235 (8.585 sec)\n",
      "INFO:tensorflow:loss = 0.50952077, step = 10335 (9.826 sec)\n",
      "INFO:tensorflow:loss = 0.50462925, step = 10435 (8.775 sec)\n",
      "INFO:tensorflow:loss = 0.49131048, step = 10535 (8.954 sec)\n",
      "INFO:tensorflow:loss = 0.51161194, step = 10635 (8.810 sec)\n",
      "INFO:tensorflow:loss = 0.49189892, step = 10735 (8.735 sec)\n",
      "INFO:tensorflow:loss = 0.45244217, step = 10835 (8.599 sec)\n",
      "INFO:tensorflow:loss = 0.5231385, step = 10935 (8.917 sec)\n",
      "INFO:tensorflow:global_step/sec: 11.2768\n",
      "INFO:tensorflow:loss = 0.5461174, step = 11035 (8.829 sec)\n",
      "INFO:tensorflow:loss = 0.5328863, step = 11135 (8.628 sec)\n",
      "INFO:tensorflow:loss = 0.5831222, step = 11235 (8.300 sec)\n",
      "INFO:tensorflow:loss = 0.5753766, step = 11335 (7.912 sec)\n",
      "INFO:tensorflow:loss = 0.5203026, step = 11435 (8.527 sec)\n",
      "INFO:tensorflow:loss = 0.55177057, step = 11535 (8.694 sec)\n",
      "INFO:tensorflow:loss = 0.5044052, step = 11635 (8.612 sec)\n",
      "INFO:tensorflow:loss = 0.54929847, step = 11735 (8.030 sec)\n",
      "INFO:tensorflow:loss = 0.5100083, step = 11835 (8.013 sec)\n",
      "INFO:tensorflow:loss = 0.54684854, step = 11935 (8.033 sec)\n",
      "INFO:tensorflow:global_step/sec: 12.0882\n",
      "INFO:tensorflow:loss = 0.49854505, step = 12035 (7.976 sec)\n",
      "INFO:tensorflow:loss = 0.5296041, step = 12135 (8.307 sec)\n",
      "INFO:tensorflow:loss = 0.5118119, step = 12235 (8.437 sec)\n",
      "INFO:tensorflow:loss = 0.5206564, step = 12335 (8.180 sec)\n",
      "INFO:tensorflow:loss = 0.56792927, step = 12435 (8.102 sec)\n",
      "INFO:tensorflow:loss = 0.53202826, step = 12535 (8.284 sec)\n"
     ]
    }
   ],
   "source": [
    "!python NFM.py   --task_type=train \\\n",
    "                    --learning_rate=0.0005 \\\n",
    "                    --optimizer=Adam \\\n",
    "                    --num_epochs=1 \\\n",
    "                    --batch_size=256 \\\n",
    "                    --field_size=39 \\\n",
    "                    --feature_size=117581 \\\n",
    "                    --deep_layers=400,400,400 \\\n",
    "                    --dropout=0.5,0.5,0.5 \\\n",
    "                    --log_steps=1000 \\\n",
    "                    --num_threads=8 \\\n",
    "                    --model_dir=./criteo_model/NFM \\\n",
    "                    --data_dir=./criteo_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "### DeepCTR\n",
    "充分利用图像带来的视觉影响，结合图像信息(通过CNN抽取)和业务特征一起判断点击率大小\n",
    "![](https://pic3.zhimg.com/v2-df0ed2332c6fb09786dfd29a3311b47c_r.jpg)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# %load train_with_googlenet.py\n",
    "from keras.models import Sequential\n",
    "from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape\n",
    "from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
    "from keras.optimizers import SGD, Adadelta, Adagrad\n",
    "from keras.layers import Embedding,Merge\n",
    "from keras.callbacks import ModelCheckpoint\n",
    "import keras\n",
    "from keras.preprocessing import image\n",
    "import numpy as np\n",
    "import sys, os, re\n",
    "from keras.applications.inception_v3 import InceptionV3, preprocess_input\n",
    "\n",
    "#定义VGG卷积神经网络\n",
    "def GoogleInceptionV3():\n",
    "    model = InceptionV3(weights='imagenet', include_top=False)\n",
    "    model.trainable = False\n",
    "\n",
    "    return model\n",
    "\n",
    "#加载field和feature信息\n",
    "def load_field_feature_meta(field_info_file):\n",
    "    field_feature_dic = {}\n",
    "    for line in open(field_info_file):\n",
    "        contents = line.strip().split(\"\\t\")\n",
    "        field_id = int(contents[1])\n",
    "        feature_count = int(contents[4])\n",
    "        field_feature_dic[field_id] = feature_count\n",
    "    return field_feature_dic\n",
    "\n",
    "#CTR特征做embedding\n",
    "def CTR_embedding(field_feature_dic):\n",
    "    emd = []\n",
    "    for field_id in range(len(field_feature_dic)):\n",
    "        # 先把离散特征embedding到稠密的层\n",
    "        tmp_model = Sequential()\n",
    "        #留一个位置给rare\n",
    "        input_dims = field_feature_dic[field_id]+1\n",
    "        if input_dims>16:\n",
    "                dense_dim = 16\n",
    "        else:\n",
    "                dense_dim = input_dims\n",
    "        tmp_model.add(Dense(dense_dim, input_dim=input_dims))\n",
    "        emd.append(tmp_model)\n",
    "    return emd\n",
    "\n",
    "#总的网络结构\n",
    "def full_network(field_feature_dic):\n",
    "    print \"GoogleNet model loading\"\n",
    "    googleNet_model = GoogleInceptionV3()\n",
    "    image_model = Flatten()(googleNet_model.outputs)\n",
    "    image_model = Dense(256)(image_model)\n",
    "    \n",
    "    print \"GoogleNet model loaded\"\n",
    "    print \"initialize embedding model\"\n",
    "    print \"loading fields info...\"\n",
    "    emd = CTR_embedding(field_feature_dic)\n",
    "    print \"embedding model done!\"\n",
    "    print \"initialize full model...\"\n",
    "    full_model = Sequential()\n",
    "    full_input = [image_model] + emd\n",
    "    full_model.add(Merge(full_input, mode='concat'))\n",
    "    #批规范化\n",
    "    full_model.add(keras.layers.normalization.BatchNormalization())\n",
    "    #全连接层\n",
    "    full_model.add(Dense(128))\n",
    "    full_model.add(Dropout(0.4))\n",
    "    full_model.add(Activation('relu'))\n",
    "    #全连接层\n",
    "    full_model.add(Dense(128))\n",
    "    full_model.add(Dropout(0.4))\n",
    "    #最后的分类\n",
    "    full_model.add(Dense(1))\n",
    "    full_model.add(Activation('sigmoid'))\n",
    "    #编译整个模型\n",
    "    full_model.compile(loss='binary_crossentropy',\n",
    "                  optimizer='adadelta',\n",
    "                  metrics=['binary_accuracy','fmeasure'])\n",
    "    #输出模型每一层的信息\n",
    "    full_model.summary()\n",
    "    return full_model\n",
    "\n",
    "\n",
    "#图像预处理\n",
    "def vgg_image_preoprocessing(image):\n",
    "    img = image.load_img(image, target_size=(299, 299))\n",
    "    x = image.img_to_array(img)\n",
    "    x = np.expand_dims(x, axis=0)\n",
    "    x = preprocess_input(x)\n",
    "    return x\n",
    "\n",
    "#CTR特征预处理\n",
    "def ctr_feature_preprocessing(field_feature_string):\n",
    "    contents = field_feature_string.strip().split(\" \")\n",
    "    feature_dic = {}\n",
    "    for content in contents:\n",
    "        field_id, feature_id, num = content.split(\":\")\n",
    "        feature_dic[int(field_id)] = int(feature_id)\n",
    "    return feature_dic\n",
    "\n",
    "#产出用于训练的一个batch数据\n",
    "def generate_batch_from_file(in_f, field_feature_dic, batch_num, skip_lines=0):\n",
    "    #初始化x和y\n",
    "    img_x = []\n",
    "    x = []\n",
    "    for field_id in range(len(field_feature_dic)):\n",
    "            x.append(np.zeros((batch_num, int(field_feature_dic[field_id])+1)))\n",
    "    y = [0.0]*batch_num\n",
    "    round_num = 1\n",
    "\n",
    "    while True:\n",
    "        line_count = 0\n",
    "        skips = 0\n",
    "        f = open(in_f)\n",
    "        for line in f:\n",
    "            if(skip_lines>0 and round_num==1):\n",
    "                if skips < skip_lines:\n",
    "                    skips += 1\n",
    "                    continue\n",
    "            if (line_count+1)%batch_num == 0:\n",
    "                contents = line.strip().split(\"\\t\")\n",
    "                img_name = \"images/\"+re.sub(r'.jpg.*', '.jpg', contents[1].split(\"/\")[-1])\n",
    "                if not os.path.isfile(img_name):\n",
    "                    continue\n",
    "                #初始化最后一个样本\n",
    "                try:\n",
    "                    img_input = vgg_image_preoprocessing(img_name)\n",
    "                except:\n",
    "                    continue\n",
    "                #图片特征填充\n",
    "                img_x.append(img_input)\n",
    "                #ctr特征填充\n",
    "                ctr_feature_dic = ctr_feature_preprocessing(contents[2])\n",
    "                for field_id in ctr_feature_dic:\n",
    "                    x[field_id][line_count][ctr_feature_dic[field_id]] = 1.0\n",
    "                #填充y值\n",
    "                y[line_count] = int(contents[0])\n",
    "                #print \"shape is\", np.array(img_x).shape\n",
    "                yield ([np.array(img_x)]+x, y)\n",
    "\n",
    "                img_x = []\n",
    "                x = []\n",
    "                for field_id in range(len(field_feature_dic)):\n",
    "                    x.append(np.zeros((batch_num, int(field_feature_dic[field_id])+1)))\n",
    "                y = [0.0]*batch_num\n",
    "                line_count = 0\n",
    "            else:   \n",
    "                contents = line.strip().split(\"\\t\")\n",
    "                img_name = \"images/\"+re.sub(r'.jpg.*', '.jpg', contents[1].split(\"/\")[-1])\n",
    "                if not os.path.isfile(img_name):\n",
    "                    continue\n",
    "                try:\n",
    "                    img_input = vgg_image_preoprocessing(img_name)\n",
    "                except:\n",
    "                    continue\n",
    "                #图片特征填充\n",
    "                img_x.append(img_input)\n",
    "                #ctr特征填充\n",
    "                ctr_feature_dic = ctr_feature_preprocessing(contents[2])\n",
    "                for field_id in ctr_feature_dic:\n",
    "                    x[field_id][line_count][ctr_feature_dic[field_id]] = 1.0\n",
    "                #填充y值\n",
    "                y[line_count] = int(contents[0])\n",
    "                line_count += 1\n",
    "        f.close()\n",
    "        round_num += 1\n",
    "\n",
    "def train_network(skip_lines, batch_num, field_info_file, data_file, weight_file):\n",
    "    print \"starting train whole network...\\n\"\n",
    "    field_feature_dic = load_field_feature_meta(field_info_file)\n",
    "    full_model = full_network(field_feature_dic)\n",
    "    if os.path.isfile(weight_file):\n",
    "        full_model.load_weights(weight_file)\n",
    "    checkpointer = ModelCheckpoint(filepath=weight_file, save_best_only=False, verbose=1, period=3)\n",
    "    full_model.fit_generator(generate_batch_from_file(data_file, field_feature_dic, batch_num, skip_lines),samples_per_epoch=1280, nb_epoch=100000, callbacks=[checkpointer])\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    skip_lines = sys.argv[1]\n",
    "    batch_num = sys.argv[2]\n",
    "    field_info_file = sys.argv[3]\n",
    "    data_file = sys.argv[4]\n",
    "    weight_file = sys.argv[5]\n",
    "    train_network(int(skip_lines), int(batch_num), field_info_file, data_file, weight_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
