{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import tensorflow.keras.layers as layer\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "# from tensorflow.keras.models import Model\n",
    "from tensorflow.keras import Model\n",
    "import tensorflow.keras.backend as K\n",
    "from sklearn.model_selection import StratifiedKFold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>label</th>\n",
       "      <th>I1</th>\n",
       "      <th>I2</th>\n",
       "      <th>I3</th>\n",
       "      <th>I4</th>\n",
       "      <th>I5</th>\n",
       "      <th>I6</th>\n",
       "      <th>I7</th>\n",
       "      <th>I8</th>\n",
       "      <th>I9</th>\n",
       "      <th>...</th>\n",
       "      <th>C17</th>\n",
       "      <th>C18</th>\n",
       "      <th>C19</th>\n",
       "      <th>C20</th>\n",
       "      <th>C21</th>\n",
       "      <th>C22</th>\n",
       "      <th>C23</th>\n",
       "      <th>C24</th>\n",
       "      <th>C25</th>\n",
       "      <th>C26</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1</td>\n",
       "      <td>5.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1382.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>15.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>181.0</td>\n",
       "      <td>...</td>\n",
       "      <td>e5ba7672</td>\n",
       "      <td>f54016b9</td>\n",
       "      <td>21ddcdc9</td>\n",
       "      <td>b1252a9d</td>\n",
       "      <td>07b5194c</td>\n",
       "      <td>NaN</td>\n",
       "      <td>3a171ecb</td>\n",
       "      <td>c5c50484</td>\n",
       "      <td>e8b83407</td>\n",
       "      <td>9727dd16</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0</td>\n",
       "      <td>44.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>102.0</td>\n",
       "      <td>8.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>...</td>\n",
       "      <td>07c540c4</td>\n",
       "      <td>b04e4670</td>\n",
       "      <td>21ddcdc9</td>\n",
       "      <td>5840adea</td>\n",
       "      <td>60f6221e</td>\n",
       "      <td>NaN</td>\n",
       "      <td>3a171ecb</td>\n",
       "      <td>43f13e8b</td>\n",
       "      <td>e8b83407</td>\n",
       "      <td>731c3655</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>14.0</td>\n",
       "      <td>767.0</td>\n",
       "      <td>89.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>245.0</td>\n",
       "      <td>...</td>\n",
       "      <td>8efede7f</td>\n",
       "      <td>3412118d</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>e587c466</td>\n",
       "      <td>ad3062eb</td>\n",
       "      <td>3a171ecb</td>\n",
       "      <td>3b183c5c</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>893</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>4392.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1e88c74f</td>\n",
       "      <td>74ef3502</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>6b3a5ca6</td>\n",
       "      <td>NaN</td>\n",
       "      <td>3a171ecb</td>\n",
       "      <td>9117a34a</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>-1</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1e88c74f</td>\n",
       "      <td>26b3c7a7</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>21c9516a</td>\n",
       "      <td>NaN</td>\n",
       "      <td>32c7478e</td>\n",
       "      <td>b34f3128</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 40 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "   label   I1   I2    I3    I4      I5    I6    I7   I8     I9  ...       C17  \\\n",
       "0      0  1.0    1   5.0   0.0  1382.0   4.0  15.0  2.0  181.0  ...  e5ba7672   \n",
       "1      0  2.0    0  44.0   1.0   102.0   8.0   2.0  2.0    4.0  ...  07c540c4   \n",
       "2      0  2.0    0   1.0  14.0   767.0  89.0   4.0  2.0  245.0  ...  8efede7f   \n",
       "3      0  NaN  893   NaN   NaN  4392.0   NaN   0.0  0.0    0.0  ...  1e88c74f   \n",
       "4      0  3.0   -1   NaN   0.0     2.0   0.0   3.0  0.0    0.0  ...  1e88c74f   \n",
       "\n",
       "        C18       C19       C20       C21       C22       C23       C24  \\\n",
       "0  f54016b9  21ddcdc9  b1252a9d  07b5194c       NaN  3a171ecb  c5c50484   \n",
       "1  b04e4670  21ddcdc9  5840adea  60f6221e       NaN  3a171ecb  43f13e8b   \n",
       "2  3412118d       NaN       NaN  e587c466  ad3062eb  3a171ecb  3b183c5c   \n",
       "3  74ef3502       NaN       NaN  6b3a5ca6       NaN  3a171ecb  9117a34a   \n",
       "4  26b3c7a7       NaN       NaN  21c9516a       NaN  32c7478e  b34f3128   \n",
       "\n",
       "        C25       C26  \n",
       "0  e8b83407  9727dd16  \n",
       "1  e8b83407  731c3655  \n",
       "2       NaN       NaN  \n",
       "3       NaN       NaN  \n",
       "4       NaN       NaN  \n",
       "\n",
       "[5 rows x 40 columns]"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train = pd.read_csv('./data/criteo_sampled_data.csv')\n",
    "train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# train.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "cols = train.columns[1:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "dense_feats = [f for f in cols if f[0] == 'I']\n",
    "sparse_feats = [f for f in cols if f[0] == 'C']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_dense_feats(data, feats):\n",
    "    d = data.copy()\n",
    "    d = d[feats].fillna(0)\n",
    "    for f in feats:\n",
    "        d[f] = d[f].apply(lambda x: np.log(x+1) if x>-1 else -1)\n",
    "    return d\n",
    "data_dense = process_dense_feats(train, dense_feats)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_sparse_feats(data, feats):\n",
    "    d = data.copy()\n",
    "    d = d[feats].fillna('-1')\n",
    "    for f in feats:\n",
    "        d[f] = LabelEncoder().fit_transform(d[f])\n",
    "    return d\n",
    "data_sparse = process_sparse_feats(train, sparse_feats)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "total_data = pd.concat([data_dense, data_sparse], axis=1)\n",
    "total_data['label'] = train['label']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 如果你只是想对流经该层的数据做个变换，而这个变换本身没有什么需要学习的参数，那么直接用Lambda Layer是最合适的了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取类别型特征的大小\n",
    "sparse_feat_config= dict()\n",
    "for col in sparse_feats:\n",
    "    sparse_feat_config[col] = total_data[col].nunique()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构造验证集和训练集\n",
    "train_data = total_data.loc[:500000-1]\n",
    "valid_data = total_data.loc[500000:]\n",
    "\n",
    "train_dense_x = [train_data[f].values for f in dense_feats]#  train_data[dense_feats] \n",
    "train_sparse_x = [train_data[f].values for f in sparse_feats] # train_data[sparse_feats] # \n",
    "train_label = train_data['label'].values\n",
    "train_label = tf.cast(train_label, tf.int32)\n",
    "\n",
    "val_dense_x = [valid_data[f].values for f in dense_feats] # valid_data[dense_feats]   \n",
    "val_sparse_x = [valid_data[f].values for f in sparse_feats] # valid_data[sparse_feats]\n",
    "val_label = valid_data['label'].values\n",
    "val_label = tf.cast(val_label, tf.int32)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构造训练集和测试集\n",
    "def make_data(total_data,idx):\n",
    "    train_data = total_data.loc[idx,:]\n",
    "    train_dense_x = [train_data[f].values for f in dense_feats]\n",
    "    train_sparse_x = [train_data[f].values for f in sparse_feats]\n",
    "    train_label = train_data['label'].values\n",
    "    return train_sparse_x,train_dense_x,train_label"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 写法一\n",
    "继承layer,定义不同功能的层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 独立层：嵌入层\n",
    "class Embedding_dense(tf.keras.layers.Layer):\n",
    "    def __init__(self,sparse_feat_config, embeding_shape):\n",
    "        super(Embedding_dense, self).__init__()\n",
    "        # l2正则化\n",
    "        self.reg_1 = tf.keras.regularizers.l2(0.1)\n",
    "        self.embed_first = {}\n",
    "        self.sparse_feat_config = sparse_feat_config\n",
    "        self.embeding_shape = embeding_shape\n",
    "        self.sparse_feat = list(sparse_feat_config.keys())\n",
    "        for key, value in self.sparse_feat_config.items():\n",
    "            self.embed_first[key] = layer.Embedding(value+1,self.embeding_shape, \n",
    "                                                    embeddings_regularizer=self.reg_1, \n",
    "                                                    name='embed'+key)\n",
    "    def call(self,x_sparse):\n",
    "        embed_lookup_first = []\n",
    "        for i,key in enumerate(self.sparse_feat):\n",
    "\n",
    "            _embed = self.embed_first[key](x_sparse[i])\n",
    "\n",
    "            embed_lookup_first.append(_embed)\n",
    "\n",
    "        return embed_lookup_first\n",
    "    \n",
    "    def get_config(self):\n",
    "\n",
    "        config = super().get_config().copy()\n",
    "        config.update({\n",
    "            'sparse_feat_config': self.sparse_feat_config,\n",
    "            'embeding_shape': self.embeding_shape,\n",
    "        })\n",
    "        return config\n",
    "# t = Embedding_dense(sparse_feat_config,1)\n",
    "# y = t(inputs_sparse)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 独立层：一阶交叉\n",
    "class firsr_cross_dense(tf.keras.layers.Layer):\n",
    "    def __init__(self,sparse_feat_config):\n",
    "        super(firsr_cross_dense, self).__init__()\n",
    "        self.sparse_feat_config = sparse_feat_config\n",
    "        self.sparse_feat_embedding = Embedding_dense(self.sparse_feat_config,1)\n",
    "        self.dense_out_first = layer.Dense(1,name='dense_feat_first')\n",
    "    \n",
    "    def call(self,x_sparse,x_dense):\n",
    "        embed_lookup_first = self.sparse_feat_embedding(x_sparse)\n",
    "\n",
    "        fst_order_sparse_layer  = layer.Add()(embed_lookup_first)  \n",
    "        # dense 处理\n",
    "        fst_order_dense_layer = layer.Concatenate(axis=1)(x_dense)  \n",
    "        fst_order_dense_layer  = self.dense_out_first(fst_order_dense_layer)\n",
    "        # 合并，产生最终的一阶特征处理结果\n",
    "        linear_part = layer.Add()([fst_order_sparse_layer,fst_order_dense_layer])\n",
    "        linear_part = layer.Flatten()(linear_part)\n",
    "        return linear_part\n",
    "\n",
    "    def get_config(self):\n",
    "\n",
    "        config = super().get_config().copy()\n",
    "        config.update({\n",
    "            'sparse_feat_config': self.sparse_feat_config,\n",
    "        })\n",
    "        return config    \n",
    "# firsr_cross_dense(sparse_feat_config)(train_sparse_x,train_dense_x)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 独立层：二阶交叉\n",
    "class second_cross_dense(tf.keras.layers.Layer):\n",
    "    def __init__(self):\n",
    "        super(second_cross_dense, self).__init__()\n",
    "\n",
    "    def call(self,embed_lookup_second):\n",
    "        # 连接结果(None,n,k) n为特征数，k为嵌入维度\n",
    "        concat_sparse_kd_embed = layer.Concatenate(axis=1)(embed_lookup_second) \n",
    "        \n",
    "        # 先求和在平方\n",
    "        # sum_1 = K.sum(concat_sparse_kd_embed,axis=1,keepdims=True)\n",
    "        # 这样写不是层的概念，因此我们借助lambda层实现\n",
    "        sum_kd_embed = layer.Lambda(lambda x: K.sum(x, axis=1))(concat_sparse_kd_embed)\n",
    "        square_sum_kd_embed = layer.Multiply()([sum_kd_embed,sum_kd_embed])\n",
    "        \n",
    "        # 先求平方在求和\n",
    "        square_kd_embed = layer.Multiply()([concat_sparse_kd_embed,concat_sparse_kd_embed])\n",
    "        sum_square_kd_embed = layer.Lambda(lambda x: K.sum(x,axis=1))(square_kd_embed)\n",
    "        \n",
    "        # 0.5*sum(subtract)\n",
    "        sub = layer.Subtract()([square_sum_kd_embed,sum_square_kd_embed])\n",
    "        sub = layer.Lambda(lambda x: K.sum(x,axis=1,keepdims=True))(sub)\n",
    "        snd_order_sparse_layer = layer.Lambda(lambda x: x*0.5)(sub)\n",
    "        \n",
    "        return snd_order_sparse_layer\n",
    "\n",
    "# second_cross_dense()(embed_lookup_second)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 独立层：DNN\n",
    "class DNN(tf.keras.layers.Layer):\n",
    "    def __init__(self,dnn_config=[128,64,1]):\n",
    "        super(DNN, self).__init__()\n",
    "        self.dnn_layers_config = dnn_config\n",
    "        self.dnn_layers = []\n",
    "        for s in self.dnn_layers_config:\n",
    "            self.dnn_layers.append(layer.Dense(s))\n",
    "            \n",
    "    def call(self,embed_lookup_second):\n",
    "        embed_lookup_second = layer.Concatenate(axis=1)(embed_lookup_second) \n",
    "        fc_layer = layer.Flatten()(embed_lookup_second)\n",
    "        for i,_ in enumerate(self.dnn_layers):\n",
    "            fc_layer = self.dnn_layers[i](fc_layer)\n",
    "        \n",
    "        return fc_layer\n",
    "    \n",
    "    def get_config(self):\n",
    "        config = super().get_config().copy()\n",
    "        config.update({\n",
    "            'dnn_layers_config': self.dnn_layers_config,\n",
    "        })\n",
    "        return config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DeepFM:\n",
    "    def __init__(self,sparse_feat_config,dense_feats):\n",
    "        \n",
    "        self.sparse_feat_config= sparse_feat_config\n",
    "        self.inputs_sparse, self.inputs_dense = self.build_input(sparse_feat_config,dense_feats)\n",
    "        \n",
    "        self.firsr_cross_dense = firsr_cross_dense(self.sparse_feat_config)\n",
    "        self.Embedding_dense = Embedding_dense(self.sparse_feat_config,8)\n",
    "        self.second_cross_dense = second_cross_dense()\n",
    "        self.DNN = DNN([128,64,1])\n",
    "        \n",
    "        self.deepFm =  self.build_model() \n",
    "    def build_input(self,sparse_feat_config,dense_feats):\n",
    "        inputs_sparse = []\n",
    "        inputs_dense = []\n",
    "        for key in sparse_feat_config:\n",
    "            inputs_sparse.append(layer.Input(shape=(1,),name=key))\n",
    "        for key in dense_feats:\n",
    "            inputs_dense.append(layer.Input(shape=(1,),name=key))\n",
    "        \n",
    "        return inputs_sparse, inputs_dense\n",
    "\n",
    "    def build_model(self):\n",
    "        linear_part = self.firsr_cross_dense(self.inputs_sparse, self.inputs_dense) \n",
    "        embed_lookup =  self.Embedding_dense(self.inputs_sparse) \n",
    "        snd_order_sparse_layer = self.second_cross_dense(embed_lookup)\n",
    "        fc_layer = self.DNN(embed_lookup)\n",
    "        \n",
    "        output_layer = layer.Add()([linear_part, snd_order_sparse_layer, fc_layer])\n",
    "        output_layer = layer.Activation('sigmoid')(output_layer)\n",
    "        model = Model(self.inputs_sparse + self.inputs_dense, outputs=output_layer)\n",
    "        model.compile(optimizer = tf.keras.optimizers.RMSprop(learning_rate=1e-3),\n",
    "                      loss= 'binary_crossentropy',\n",
    "                      metrics=['AUC'])\n",
    "        return model\n",
    "    \n",
    "\n",
    "    def train(self,train_data,train_label,valid_data, valid_label,batch_size,epochs,callbacks):\n",
    "        self.deepFm.fit(train_data,train_label,\n",
    "                  batch_size=batch_size, epochs=epochs, verbose=1, \n",
    "                  validation_data=(valid_data, valid_label),\n",
    "                  callbacks = callbacks\n",
    "                 )\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "fold: 0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/indexed_slices.py:434: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.\n",
      "  \"Converting sparse IndexedSlices to a dense Tensor of unknown shape. \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "3749/3750 [============================>.] - ETA: 0s - loss: 2.3934 - auc: 0.7188\n",
      "Epoch 00001: val_loss improved from inf to 0.70909, saving model to ./model/0.h5\n",
      "3750/3750 [==============================] - 71s 19ms/step - loss: 2.3930 - auc: 0.7188 - val_loss: 0.7091 - val_auc: 0.7460 - lr: 0.0010\n",
      "fold: 1\n",
      "3748/3750 [============================>.] - ETA: 0s - loss: 2.4185 - auc: 0.7031\n",
      "Epoch 00001: val_loss improved from inf to 0.71248, saving model to ./model/1.h5\n",
      "3750/3750 [==============================] - 70s 19ms/step - loss: 2.4176 - auc: 0.7032 - val_loss: 0.7125 - val_auc: 0.7478 - lr: 0.0010\n",
      "fold: 2\n",
      "3748/3750 [============================>.] - ETA: 0s - loss: 2.3972 - auc: 0.7219\n",
      "Epoch 00001: val_loss improved from inf to 0.71068, saving model to ./model/2.h5\n",
      "3750/3750 [==============================] - 72s 19ms/step - loss: 2.3963 - auc: 0.7219 - val_loss: 0.7107 - val_auc: 0.7443 - lr: 0.0010\n",
      "fold: 3\n",
      "3748/3750 [============================>.] - ETA: 0s - loss: 2.3962 - auc: 0.7203\n",
      "Epoch 00001: val_loss improved from inf to 0.71336, saving model to ./model/3.h5\n",
      "3750/3750 [==============================] - 72s 19ms/step - loss: 2.3953 - auc: 0.7203 - val_loss: 0.7134 - val_auc: 0.7421 - lr: 0.0010\n",
      "fold: 4\n",
      "3748/3750 [============================>.] - ETA: 0s - loss: 2.3874 - auc: 0.7268\n",
      "Epoch 00001: val_loss improved from inf to 0.71458, saving model to ./model/4.h5\n",
      "3750/3750 [==============================] - 72s 19ms/step - loss: 2.3866 - auc: 0.7268 - val_loss: 0.7146 - val_auc: 0.7425 - lr: 0.0010\n"
     ]
    }
   ],
   "source": [
    "# 五折交叉 + 提前停止 + 保存模型\n",
    "skf = StratifiedKFold(n_splits = 5, random_state=1996, shuffle = True)\n",
    "for idx, (train_idx, val_idx) in enumerate(skf.split(total_data,total_data['label'])):\n",
    "    print('fold:',idx)\n",
    "    K.clear_session()\n",
    "    train_sparse_x,train_dense_x,train_label = make_data(total_data,train_idx)\n",
    "    val_sparse_x,val_dense_x,val_label = make_data(total_data,val_idx) \n",
    "    # 定义回调\n",
    "    \n",
    "    # 保存模型\n",
    "    file_path = f'./model/{idx}.h5'\n",
    "\n",
    "    checkpoint = tf.keras.callbacks.ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True,save_weights_only=True, mode='min')\n",
    "    # metric 不提高时，减小学习率\n",
    "    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=1, min_lr=0.0001, verbose=1)\n",
    "    # val_loss 连续两次提升小于 1e-2，提前停止\n",
    "    earlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2,verbose=1, mode='auto')\n",
    "    callbacks = [checkpoint, reduce_lr, earlystopping]\n",
    "\n",
    "    # 初始化模型\n",
    "    deepfm = DeepFM(sparse_feat_config, dense_feats)\n",
    "    deepfm.train(train_sparse_x+train_dense_x,train_label,\n",
    "                 val_sparse_x+val_dense_x,val_label,\n",
    "                128,1, callbacks=callbacks)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[0.33034733],\n",
       "       [0.31418633],\n",
       "       [0.24944091],\n",
       "       ...,\n",
       "       [0.75203234],\n",
       "       [0.83707786],\n",
       "       [0.73838204]], dtype=float32)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 模型的加载与预测\n",
    "deepfm = DeepFM(sparse_feat_config, dense_feats).build_model()\n",
    "deepfm.load_weights('./model/0.h5')\n",
    "deepfm.predict(val_sparse_x+val_dense_x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 写法二\n",
    "直接继承Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DeepFM(Model):\n",
    "    def __init__(self, sparse_feat_config):\n",
    "        super(DeepFM, self).__init__()\n",
    "        # 一阶交叉:包括dense_feat和sparse_feat\n",
    "        self.reg_1 = tf.keras.regularizers.l2(0.5)\n",
    "        self.embed_first = []\n",
    "#         self.embed_lookup_first = []\n",
    "        self.sparse_feat_config = sparse_feat_config\n",
    "        for key, value in self.sparse_feat_config.items():\n",
    "            self.embed_first.append(layer.Embedding(value,1, \n",
    "                                                    embeddings_regularizer=self.reg_1, \n",
    "                                                    name='embed'+key))\n",
    "        self.dense_out_first = layer.Dense(1,name='dense_feat_first')\n",
    "        \n",
    "        # 二阶交叉：只对sparse交叉\n",
    "        self.embed_second = []\n",
    "#         self.embed_lookup_second = []\n",
    "        self.reg_2 = tf.keras.regularizers.l2(0.5)\n",
    "        for key, value in self.sparse_feat_config.items():\n",
    "            self.embed_second.append(layer.Embedding(value,8,\n",
    "                                     embeddings_regularizer = self.reg_2,\n",
    "                                     name = 'embed' + key))\n",
    "            \n",
    "        \n",
    "        # DNN部分\n",
    "        self.dnn_layers_out = [128,64,1]\n",
    "        self.dnn_layers = []\n",
    "        for s in self.dnn_layers_out:\n",
    "            self.dnn_layers.append(layer.Dense(s))\n",
    "        \n",
    "        \n",
    "    def call(self, x_sparse,x_dense):\n",
    "        # 一阶交叉，最终linear_part(None,1)\n",
    "        # sparse处理\n",
    "        embed_lookup_first = []\n",
    "        for i,key in enumerate(self.sparse_feat_config):\n",
    "            t = tf.cast(x_sparse[key],tf.int32)\n",
    "            _embed = self.embed_first[i](t)\n",
    "            embed_lookup_first.append(_embed)\n",
    "        fst_order_sparse_layer  = layer.Add()(embed_lookup_first)\n",
    "        # dense 处理\n",
    "        fst_order_dense_layer  = self.dense_out_first(tf.cast(x_dense, tf.float32))\n",
    "        # 合并，产生最终的一阶特征处理结果\n",
    "        linear_part = layer.Add()([fst_order_sparse_layer,fst_order_dense_layer])\n",
    "        \n",
    "        # 二阶交叉,最终snd_order_sparse_layer(None,1)\n",
    "        embed_lookup_second = []\n",
    "        for i,key in enumerate(self.sparse_feat_config):\n",
    "            t = tf.cast(x_sparse[key], tf.int32)\n",
    "            _embed = self.embed_second[i](t)\n",
    "            # _embed:(None,8) -> (None,1,8)\n",
    "            embed_lookup_second.append(tf.expand_dims(_embed,axis=1))\n",
    "        # wrong\n",
    "#         print(len(embed_lookup_second))\n",
    "        # 连接结果(None,n,k) n为特征数，k为嵌入维度\n",
    "        concat_sparse_kd_embed = layer.Concatenate(axis=1)(embed_lookup_second) \n",
    "#         print(concat_sparse_kd_embed.shape)\n",
    "        \n",
    "        # 先求和在平方\n",
    "        # sum_1 = K.sum(concat_sparse_kd_embed,axis=1,keepdims=True)\n",
    "        # 这样写不是层的概念，因此我们借助lambda层实现\n",
    "        sum_kd_embed = layer.Lambda(lambda x: K.sum(x, axis=1))(concat_sparse_kd_embed)\n",
    "        square_sum_kd_embed = layer.Multiply()([sum_kd_embed,sum_kd_embed])\n",
    "        \n",
    "        # 先求平方在求和\n",
    "        square_kd_embed = layer.Multiply()([concat_sparse_kd_embed,concat_sparse_kd_embed])\n",
    "        sum_square_kd_embed = layer.Lambda(lambda x: K.sum(x,axis=1))(square_kd_embed)\n",
    "        \n",
    "        # 0.5*sum(subtract)\n",
    "        sub = layer.Subtract()([square_sum_kd_embed,sum_square_kd_embed])\n",
    "        sub = layer.Lambda(lambda x: K.sum(x,axis=1,keepdims=True))(sub)\n",
    "        snd_order_sparse_layer = layer.Lambda(lambda x: x*0.5)(sub)\n",
    "       \n",
    "        # DNN部分: 最终fc_layer(None,1)\n",
    "#         print(concat_sparse_kd_embed.shape)\n",
    "        fc_layer = layer.Flatten()(concat_sparse_kd_embed)\n",
    "#         fc_layer = flatten_sparse_embed\n",
    "        for i,_ in enumerate(self.dnn_layers):\n",
    "            fc_layer = self.dnn_layers[i](fc_layer)\n",
    "        \n",
    "        # 输出\n",
    "        output_layer = layer.Add()([linear_part, snd_order_sparse_layer, fc_layer])\n",
    "        output_layer = layer.Activation('sigmoid')(output_layer)\n",
    "#         print(linear_part[:5], snd_order_sparse_layer[:5], fc_layer[:5])\n",
    "#         print(output_layer)\n",
    "\n",
    "        return output_layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train loss 0.9574868 valid loss 0.94290245\n"
     ]
    }
   ],
   "source": [
    "num_epoch = 1\n",
    "optimizer  = tf.keras.optimizers.SGD(learning_rate = 1e-3)\n",
    "loss_fn = tf.keras.losses.BinaryCrossentropy() \n",
    "deepfm = DeepFM(sparse_feat_config)\n",
    "batch_size = 25600\n",
    "for e in range(num_epoch):\n",
    "    loss = []\n",
    "    for i in range(len(train_data)//batch_size ):\n",
    "        with tf.GradientTape() as tape:\n",
    "            beging_i = batch_size*i\n",
    "            end_i = batch_size*(i+1)\n",
    "#             print(beging_i,end_i)\n",
    "#             print(len(train_sparse_x.loc[beging_i:end_i,:]))\n",
    "            y_pred =  deepfm(train_sparse_x.loc[beging_i:end_i-1,:], train_dense_x.loc[beging_i:end_i-1,:])\n",
    "#             print(train_label[beging_i:end_i],y_pred)\n",
    "            _loss = loss_fn(train_label[beging_i:end_i],y_pred)\n",
    "        loss.append(_loss)\n",
    "        grads = tape.gradient(loss,deepfm.trainable_variables)   \n",
    "        optimizer.apply_gradients(zip(grads,deepfm.trainable_variables))\n",
    "#     tt = \n",
    "#     print(loss)\n",
    "    y_pred_val =  deepfm(val_sparse_x, val_dense_x)\n",
    "    val_loss = loss_fn(val_label,y_pred_val)\n",
    "    print('train loss',tf.reduce_mean(loss).numpy(),'valid loss',val_loss.numpy())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
