{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 缓存整个数据集\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     16
    ]
   },
   "outputs": [],
   "source": [
    "import sys, os\n",
    "import pandas as pd\n",
    "# import modin.pandas as pd\n",
    "import json\n",
    "from tqdm import tqdm\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import LabelEncoder, StandardScaler # , MinMaxScaler\n",
    "from sklearn.datasets import load_svmlight_file\n",
    "import pickle\n",
    "\n",
    "# deepctr_torch 已修改\n",
    "from deepctr_torch.models import DeepFM, PNN, WDL, DCN, NFM, xDeepFM\n",
    "from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names\n",
    "\n",
    "import torch\n",
    "\n",
    "def libsvm_to_df(data_path, header):\n",
    "    \"\"\"读取libsvm格式数据为pd.\n",
    "    \"\"\"\n",
    "    X_train, y_train = load_svmlight_file(data_path)\n",
    "    mat = X_train.todense()\n",
    "    df1 = pd.DataFrame(mat)\n",
    "    df1.columns = header\n",
    "    df2 = pd.DataFrame(y_train)\n",
    "    df2.columns = ['target']\n",
    "    df = pd.concat([df2, df1], axis=1) # 第一列为target\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     11
    ],
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 读取各列名称\n",
    "with open(\"data/train_dev_test/keys.json\", \"r\", encoding=\"utf-8\") as f1:\n",
    "    header = json.load(f1)[\"_feature\"]\n",
    "    header = [k for k,v in header.items() if v>=0]\n",
    "\n",
    "# 离散型\n",
    "realtimeFeature_ = [name for name in header if name.startswith(\"realtimeFeature_\")]\n",
    "rcin_summaryPredictResults_ = [name for name in header if name.startswith(\"rcin_summaryPredictResults_\")]\n",
    "rcin_intentTypeList = [name for name in header if name.startswith(\"rcin_intentTypeList:\")]\n",
    "\n",
    "sparse_feat = [\"oi_virtualPaymentType\", \"oi_codTimeType\"]\n",
    "sparse_feat_from_excel = [\n",
    "    \"third_id\",\n",
    "    \"orderid\",\n",
    "    \"realtimeFeature\",\n",
    "    \"entry\",\n",
    "    \"order_invoice_type\",\n",
    "    \"stock\",\n",
    "    \"order_type\",\n",
    "    \"order_state\",\n",
    "    \"oin_user_profession\",\n",
    "    \"oin_robot_ask_average_session_duration\",\n",
    "    \"oin_super_long_message_scale\",\n",
    "    \"oin_long_message_scale\",\n",
    "    \"oin_sensitive_words_scale\",\n",
    "    \"oin_user_sex\",\n",
    "    \"oin_mail_satisfied_scale\",\n",
    "    \"oin_person_robot_ask_times_scale\",\n",
    "    \"oin_user_cycl_lifecycle\",\n",
    "    \"oin_user_score\",\n",
    "    \"oin_average_message_count\",\n",
    "    \"oin_user_stage\",\n",
    "    \"oin_mail_unsatisfied_scale\",\n",
    "    \"oin_mail_resolution_scale\",\n",
    "    \"oin_message_send_interval\",\n",
    "    \"oin_person_robot_ask_average_session_duration\",\n",
    "    \"oin_user_anger_scale\",\n",
    "    \"oin_user_education\",\n",
    "    \"oin_phone_again_after_robot_ask_24h_intervel\",\n",
    "    \"oin_user_age\",\n",
    "    \"oin_invalid_ask_average_intervel\",\n",
    "    \"oin_robot_ask_times_scale\",\n",
    "    \"oin_phone_again_after_robot_ask_24h_scale\",\n",
    "    \"oin_user_anxiety_scale\",\n",
    "    \"oin_short_message_scale\",\n",
    "    \"oin_middle_message_scale\",\n",
    "    \"oin_user_level\",\n",
    "    \"oin_robot_history_invalid_ask_scale\",\n",
    "    \"rcin_longMsgProportion\",\n",
    "    \"rcin_summaryPredictResults\",\n",
    "    \"rcin_chatDuration\",\n",
    "    \"rcin_entry\",\n",
    "    \"rcin_createTime\",\n",
    "    \"rcin_midMsgProportion\",\n",
    "    \"rcin_shortMsgProportion\",\n",
    "    \"rcin_superLongMsgProportion\",\n",
    "    \"rcin_msgCount\",\n",
    "    \"oi_orderType\",\n",
    "    \"oi_idShipmentType\",\n",
    "    \"oi_discount\",\n",
    "    \"oi_orderBulk\",\n",
    "    \"oi_paymentWay\",\n",
    "    \"oi_idCompanyBranch\",\n",
    "    \"oi_isNewOrder\",\n",
    "    \"oi_moneyBalance\",\n",
    "    \"oi_idInvoiceHeaderType\",\n",
    "    \"oi_status\",\n",
    "    \"oi_promotionPrice\",\n",
    "    \"oi_idInvoicePutType\",\n",
    "    \"oi_orderBankFactPrice\",\n",
    "    \"oi_userLevel\",\n",
    "    \"oi_bigItemCodTime\",\n",
    "    \"oi_pricePeriod\",\n",
    "    \"oi_status2\",\n",
    "    \"oi_parentId\",\n",
    "    \"oi_splitType\",\n",
    "    \"oi_isRegister\",\n",
    "    \"oi_idInvoiceContentsType\",\n",
    "    \"oi_isPutBookInvoice\",\n",
    "    \"oi_idArea\",\n",
    "    \"oi_isUseBalance\",\n",
    "    \"oi_idTown\",\n",
    "    \"oi_ver\",\n",
    "    \"oi_unionId\",\n",
    "    \"oi_rePrice\",\n",
    "    \"oi_couponDiscount\",\n",
    "    \"oi_idProvince\",\n",
    "    \"oi_scareBuyState\",\n",
    "    \"oi_idInvoiceType\",\n",
    "    \"oi_initFactPrice\",\n",
    "    \"oi_jingDouDiscount\",\n",
    "    \"oi_isJdShip\",\n",
    "    \"oi_cODTime\",\n",
    "    \"oi_idPickSite\",\n",
    "    \"oi_isCodInform\",\n",
    "    \"oi_cost\",\n",
    "    \"oi_idDelivery\",\n",
    "    \"oi_idCity\",\n",
    "    \"oi_totalFee\",\n",
    "    \"oi_idInvoiceContentTypeBook\",\n",
    "    \"oi_idPaymentType\",\n",
    "    \"oi_payMoney\",\n",
    "    \"oi_jingDouRate\",\n",
    "    \"oi_zzweight\",\n",
    "    \"oi_jingDouCount\",\n",
    "    \"oi_sxzzweight\",\n",
    "    \"oi_clientSystemName\",\n",
    "    \"oi_clientBrowserName\",\n",
    "    \"ci_caseType\",\n",
    "    \"ci_caseState\",\n",
    "    \"ci_owner\",\n",
    "    \"ci_outLineId\",\n",
    "    \"ci_source\",\n",
    "    \"ci_callBackFlag\",\n",
    "    \"ci_curTransferId\",\n",
    "    \"ci_orderType\",\n",
    "    \"ci_orderSourceFlag\",\n",
    "    \"ci_smartFollowFlag\",\n",
    "    \"ci_cusLevel\",\n",
    "    \"ci_curUpgradeCareId\",\n",
    "    \"ci_curUpgradeLv2Id\",\n",
    "    \"ei_sku\",\n",
    "    \"ei_order\",\n",
    "    \"ei_pin\"]\n",
    "sparse_features = realtimeFeature_ + rcin_summaryPredictResults_ + sparse_feat + \\\n",
    "                    sparse_feat_from_excel + rcin_intentTypeList\n",
    "sparse_features = [item for item in sparse_features if item in header]\n",
    "\n",
    "feat_lbe = {}\n",
    "df1 = libsvm_to_df(data_path = f\"data/train_dev_test/train_1000.txt\", header=header)\n",
    "for feat_name in tqdm(sparse_features, desc = \"LabelEncoder fitting\"):    \n",
    "    lbe = LabelEncoder()\n",
    "    lbe = lbe.fit(df1[feat_name])\n",
    "    feat_lbe[feat_name] = lbe\n",
    "\n",
    "# 数值型\n",
    "dense_features = [name for name in header if name not in sparse_features]\n",
    "\n",
    "feat_scaler = {}\n",
    "for feat_name in tqdm(dense_features, desc = \"StandardScaler fitting\"):\n",
    "\n",
    "    scaler = StandardScaler()\n",
    "    scaler.fit(df1[feat_name].to_numpy().reshape(-1, 1))\n",
    "    feat_scaler[feat_name] = scaler\n",
    "\n",
    "vocabulary_size_map = {feat_name:len(feat_lbe[feat_name].classes_) for feat_name in sparse_features}\n",
    "\n",
    "fixlen_feature_columns = [SparseFeat(feat_name, vocabulary_size=vocabulary_size_map[feat_name], embedding_dim=4)\n",
    "                        for i, feat_name in enumerate(sparse_features)] + [DenseFeat(feat_name, 1,)\n",
    "                        for feat_name in dense_features]\n",
    "dnn_feature_columns = fixlen_feature_columns\n",
    "linear_feature_columns = fixlen_feature_columns\n",
    "\n",
    "feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)\n",
    "\n",
    "target = ['target']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "读取`test_model_input`和`dev_model_input`并保存. 模型`load_state_dict`后能方便地使用. "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "train_path = f\"data/train_dev_test/train_1000.txt\"\n",
    "df_train = libsvm_to_df(data_path = train_path, header=header)\n",
    "\n",
    "for feat_name in tqdm(sparse_features, desc = \"train LabelEncoder transforming\"):\n",
    "    lbe_dict = dict(zip(feat_lbe[feat_name].classes_, feat_lbe[feat_name].transform(feat_lbe[feat_name].classes_)))\n",
    "    df_train[feat_name] = df_train[feat_name].apply(lambda x: lbe_dict.get(x, 0))\n",
    "\n",
    "for feat_name in tqdm(dense_features, desc = \"train StandardScaler transforming\"):\n",
    "    df_train[feat_name] = feat_scaler[feat_name].transform(df_train[feat_name].to_numpy().reshape(-1, 1))\n",
    "\n",
    "train_model_input = {name:df_train[name] for name in feature_names}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "dev_path = f\"data/train_dev_test/dev_1000.txt\"\n",
    "df_dev = libsvm_to_df(data_path = dev_path, header=header)\n",
    "\n",
    "for feat_name in tqdm(sparse_features, desc = \"dev LabelEncoder transforming\"):\n",
    "    lbe_dict = dict(zip(feat_lbe[feat_name].classes_, feat_lbe[feat_name].transform(feat_lbe[feat_name].classes_)))\n",
    "    df_dev[feat_name] = df_dev[feat_name].apply(lambda x: lbe_dict.get(x, 0))\n",
    "\n",
    "for feat_name in tqdm(dense_features, desc = \"dev StandardScaler transforming\"):\n",
    "    df_dev[feat_name] = feat_scaler[feat_name].transform(df_dev[feat_name].to_numpy().reshape(-1, 1))\n",
    "\n",
    "dev_model_input = {name:df_dev[name] for name in feature_names}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_path = f\"data/train_dev_test/test_1000.txt\"\n",
    "df_test = libsvm_to_df(data_path = test_path, header=header)\n",
    "\n",
    "for feat_name in tqdm(sparse_features, desc = \"Test LabelEncoder transforming\"):\n",
    "    lbe_dict = dict(zip(feat_lbe[feat_name].classes_, feat_lbe[feat_name].transform(feat_lbe[feat_name].classes_)))\n",
    "    df_test[feat_name] = df_test[feat_name].apply(lambda x: lbe_dict.get(x, 0))\n",
    "\n",
    "for feat_name in tqdm(dense_features, desc = \"test StandardScaler transforming\"):\n",
    "    df_test[feat_name] = feat_scaler[feat_name].transform(df_test[feat_name].to_numpy().reshape(-1, 1))\n",
    "\n",
    "test_model_input = {name:df_test[name] for name in feature_names}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 保存pkl\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# fit-transformer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_cached = {\n",
    "    \"fixlen_feature_columns\":fixlen_feature_columns,\n",
    "    \"train_model_input\":train_model_input,\n",
    "    \"df_train_target_values\":df_train[target].values,\n",
    "    }\n",
    "with open(\"train_cached_StandardScaler.pkl\", \"wb\")as f1:\n",
    "    pickle.dump(train_cached, f1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dev_test_cached = {\n",
    "    \"fixlen_feature_columns\":fixlen_feature_columns,\n",
    "    \"dev_model_input\":dev_model_input,\n",
    "    \"df_dev_target_values\":df_dev[target].values,\n",
    "    \"test_model_input\":test_model_input,\n",
    "    \"df_test_target_values\":df_test[target].values,\n",
    "    }\n",
    "with open(\"dev_test_cached_StandardScaler.pkl\", \"wb\")as f1:\n",
    "    pickle.dump(dev_test_cached, f1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "!ls -hl *cached*.pkl"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 读取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pickle\n",
    "import torch\n",
    "from deepctr_torch.models import DeepFM, PNN, WDL, DCN, NFM, xDeepFM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"train_cached_StandardScaler.pkl\", \"rb\")as f1:\n",
    "    train_cached = pickle.load(f1)\n",
    "with open(\"dev_test_cached_StandardScaler.pkl\", \"rb\")as f1:\n",
    "    dev_test_cached = pickle.load(f1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# DeepFM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "DEVICE = \"cuda\" if torch.cuda.is_available() else 'cpu'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# prelu-256-128\n",
    "top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]\n",
    "label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]\n",
    "label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)\n",
    "\n",
    "model_path = \"./save_deepFM_StandardScaler/\"\n",
    "if not os.path.exists(model_path): os.mkdir(model_path)\n",
    "model_path += \"DeepFM_prelu-256-128.pt\"\n",
    "\n",
    "model = DeepFM(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.fit(x=train_cached[\"train_model_input\"], y=train_cached[\"df_train_target_values\"],\n",
    "        validation_data = (dev_test_cached[\"dev_model_input\"], dev_test_cached[\"df_dev_target_values\"]),\n",
    "        batch_size=1024, epochs=10, verbose=1, label_weight=label_weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### relu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# relu-256-128\n",
    "top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]\n",
    "label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]\n",
    "label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)\n",
    "\n",
    "model_path = \"./save_deepFM/\"\n",
    "if not os.path.exists(model_path): os.mkdir(model_path)\n",
    "model_path += \"DeepFM_relu-256-128.pt\"\n",
    "\n",
    "model = DeepFM(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='relu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.fit(x=train_cached[\"train_model_input\"], y=train_cached[\"df_train_target_values\"],\n",
    "        validation_data = (dev_test_cached[\"dev_model_input\"], dev_test_cached[\"df_dev_target_values\"]),\n",
    "        batch_size=1024, epochs=10, verbose=1, label_weight=label_weight)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-128-56"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# prelu-128-56\n",
    "# 降低hidden维度\n",
    "top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]\n",
    "label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]\n",
    "label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)\n",
    "\n",
    "model_path = \"./save_deepFM/\"\n",
    "if not os.path.exists(model_path): os.mkdir(model_path)\n",
    "model_path += \"DeepFM_prelu-128-56.pt\"\n",
    "\n",
    "model = DeepFM(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(128, 56), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.fit(x=train_cached[\"train_model_input\"], y=train_cached[\"df_train_target_values\"],\n",
    "        validation_data = (dev_test_cached[\"dev_model_input\"], dev_test_cached[\"df_dev_target_values\"]),\n",
    "        batch_size=1024, epochs=10, verbose=1, label_weight=label_weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128  l2_reg_linear=0.0001"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# prelu-256-128\n",
    "# regular项: 默认都是0.00001. 改为:l2_reg_linear=0.0001, l2_reg_embedding=0.0001  # l2_reg_dnn代码没有实现\n",
    "top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]\n",
    "label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]\n",
    "label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)\n",
    "\n",
    "model_path = \"./save_deepFM/\"\n",
    "if not os.path.exists(model_path): os.mkdir(model_path)\n",
    "model_path += \"DeepFM_prelu-256-128-l2_reg-0.0001.pt\"\n",
    "\n",
    "model = DeepFM(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path,\n",
    "              l2_reg_linear=0.0001, l2_reg_embedding=0.0001)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.fit(x=train_cached[\"train_model_input\"], y=train_cached[\"df_train_target_values\"],\n",
    "        validation_data = (dev_test_cached[\"dev_model_input\"], dev_test_cached[\"df_dev_target_values\"]),\n",
    "        batch_size=1024, epochs=10, verbose=1, label_weight=label_weight)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### dev-test互换  l2_reg_linear=0.0001"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# prelu-256-128\n",
    "# regular项: 默认都是0.00001. 改为:l2_reg_linear=0.0001, l2_reg_embedding=0.0001\n",
    "top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]\n",
    "label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]\n",
    "label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)\n",
    "\n",
    "model_path = \"./save_deepFM/\"\n",
    "if not os.path.exists(model_path): os.mkdir(model_path)\n",
    "model_path += \"DeepFM_prelu-256-128-l2_reg-0.0001-dev-test互换.pt\"\n",
    "\n",
    "model = DeepFM(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path,\n",
    "              l2_reg_linear=0.0001, l2_reg_embedding=0.0001)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.fit(x=train_cached[\"train_model_input\"], y=train_cached[\"df_train_target_values\"],\n",
    "        validation_data = (dev_test_cached[\"test_model_input\"], dev_test_cached[\"df_test_target_values\"]),\n",
    "        batch_size=1024, epochs=10, verbose=1, label_weight=label_weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "DEVICE = \"cuda\" if torch.cuda.is_available() else 'cpu'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# prelu-256-128\n",
    "for num_epoch in range(10):\n",
    "    model_path = f\"./save_deepFM/DeepFM_prelu-256-128_epoch_{num_epoch}.pt\"\n",
    "    print(model_path)\n",
    "\n",
    "    model = DeepFM(linear_feature_columns = dev_test_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_feature_columns = dev_test_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                    task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "    model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "    \n",
    "    model.load_state_dict((torch.load(model_path, map_location=torch.device(DEVICE))), strict=False)\n",
    "    eval_result = model.evaluate(dev_test_cached[\"test_model_input\"], dev_test_cached[\"df_test_target_values\"],\n",
    "                                 batch_size=1024)\n",
    "    for k,v in eval_result.items():\n",
    "        print(f\"{k}: {v:.4f}\")\n",
    "    print(\"\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### relu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# relu-256-128\n",
    "for num_epoch in range(10):\n",
    "    model_path = f\"./save_deepFM/DeepFM_relu-256-128_epoch_{num_epoch}.pt\"\n",
    "    print(model_path)\n",
    "\n",
    "    model = DeepFM(linear_feature_columns = dev_test_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_feature_columns = dev_test_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='relu',\n",
    "                    task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "    model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "    \n",
    "    model.load_state_dict((torch.load(model_path, map_location=torch.device(DEVICE))), strict=False)\n",
    "    eval_result = model.evaluate(dev_test_cached[\"test_model_input\"], dev_test_cached[\"df_test_target_values\"],\n",
    "                                 batch_size=1024)\n",
    "    for k,v in eval_result.items():\n",
    "        print(f\"{k}: {v:.4f}\")\n",
    "    print(\"\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-128-56"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# relu-256-128\n",
    "for num_epoch in range(10):\n",
    "    model_path = f\"./save_deepFM/DeepFM_prelu-128-56_epoch_{num_epoch}.pt\"\n",
    "    print(model_path)\n",
    "\n",
    "    model = DeepFM(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_hidden_units=(128, 56), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                    task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "\n",
    "    model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "    \n",
    "    model.load_state_dict((torch.load(model_path, map_location=torch.device(DEVICE))), strict=False)\n",
    "    eval_result = model.evaluate(dev_test_cached[\"test_model_input\"], dev_test_cached[\"df_test_target_values\"],\n",
    "                                 batch_size=1024)\n",
    "    for k,v in eval_result.items():\n",
    "        print(f\"{k}: {v:.4f}\")\n",
    "    print(\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128  l2_reg_linear=0.0001"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# relu-256-128\n",
    "for num_epoch in range(10):\n",
    "    model_path = f\"./save_deepFM/DeepFM_prelu-256-128-l2_reg-0.0001_epoch_{num_epoch}.pt\"\n",
    "    print(model_path)\n",
    "\n",
    "    model = DeepFM(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                    task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path,\n",
    "                  l2_reg_linear=0.0001, l2_reg_embedding=0.0001)\n",
    "\n",
    "    model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "    \n",
    "    model.load_state_dict((torch.load(model_path, map_location=torch.device(DEVICE))), strict=False)\n",
    "    eval_result = model.evaluate(dev_test_cached[\"test_model_input\"], dev_test_cached[\"df_test_target_values\"],\n",
    "                                 batch_size=1024)\n",
    "    for k,v in eval_result.items():\n",
    "        print(f\"{k}: {v:.4f}\")\n",
    "    print(\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## debug输出结果-test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": []
   },
   "outputs": [],
   "source": [
    "def topK(y_pred, K=3):\n",
    "    max_k_preds = y_pred.argsort(axis=1)[:, -K:][:, ::-1] #得到top-k label\n",
    "    return max_k_preds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_path = \"./save_deepFM/DeepFM_prelu-256-128_epoch_2.pt\"\n",
    "\n",
    "model = DeepFM(linear_feature_columns = dev_test_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = dev_test_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.load_state_dict((torch.load(model_path, map_location=torch.device(DEVICE))), strict=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_pred = model.predict(dev_test_cached[\"test_model_input\"], batch_size=1024)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_true = dev_test_cached[\"df_test_target_values\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(y_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(y_true)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": []
   },
   "outputs": [],
   "source": [
    "def top3_acc(y_true, y_pred):\n",
    "    import numpy as np\n",
    "    K=3\n",
    "    #以下是计算方法\n",
    "    max_k_preds = y_pred.argsort(axis=1)[:, -K:][:, ::-1] #得到top-k label\n",
    "    match_array = np.logical_or.reduce(max_k_preds == y_true, axis = 1) #得到匹配结果\n",
    "    topk_acc_score = match_array.sum() / match_array.shape[0]\n",
    "    # print(f\"top3_acc\\t 分子: {match_array.sum()} \\t 分母: {match_array.shape[0]}\")\n",
    "    return topk_acc_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "top3_acc(y_true, y_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "correct = 0\n",
    "for index, (pred, ans) in enumerate(zip(topK(y_pred[:1000], K=3), y_true[:1000])):\n",
    "    is_in = 1 if ans in pred else 0\n",
    "    correct += is_in\n",
    "    print(f\"{index}\\t{pred}\\t{ans}\\t{is_in}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "correct/1000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_parameters = sum(param.numel() for param in model.parameters())\n",
    "print(f'model parameters: {model_parameters}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Wide & Deep"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# prelu-256-128\n",
    "top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]\n",
    "label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]\n",
    "label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)\n",
    "\n",
    "model_path = \"./save_WDL/\"\n",
    "if not os.path.exists(model_path): os.mkdir(model_path)\n",
    "model_path += \"WDL_prelu-256-128.pt\"\n",
    "\n",
    "model = WDL(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.fit(x=train_cached[\"train_model_input\"], y=train_cached[\"df_train_target_values\"],\n",
    "        validation_data = (dev_test_cached[\"dev_model_input\"], dev_test_cached[\"df_dev_target_values\"]),\n",
    "        batch_size=1024, epochs=10, verbose=1, label_weight=label_weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 预测"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# prelu-256-128\n",
    "for num_epoch in range(10):\n",
    "    model_path = f\"./save_WDL/WDL_prelu-256-128_epoch_{num_epoch}.pt\"\n",
    "    print(model_path)\n",
    "\n",
    "    model = WDL(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                    dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                    task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "    model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "    \n",
    "    model.load_state_dict((torch.load(model_path, map_location=torch.device(DEVICE))), strict=False)\n",
    "    eval_result = model.evaluate(dev_test_cached[\"test_model_input\"], dev_test_cached[\"df_test_target_values\"],\n",
    "                                 batch_size=1024)\n",
    "    for k,v in eval_result.items():\n",
    "        print(f\"{k}: {v:.4f}\")\n",
    "    print(\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Deep & Cross Network"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# prelu-256-128\n",
    "top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]\n",
    "label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]\n",
    "label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)\n",
    "\n",
    "model_path = \"./save_DCN/\"\n",
    "if not os.path.exists(model_path): os.mkdir(model_path)\n",
    "model_path += \"DCN_prelu-256-128.pt\"\n",
    "\n",
    "model = DCN(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.fit(x=train_cached[\"train_model_input\"], y=train_cached[\"df_train_target_values\"],\n",
    "        validation_data = (dev_test_cached[\"dev_model_input\"], dev_test_cached[\"df_dev_target_values\"]),\n",
    "        batch_size=1024, epochs=10, verbose=1, label_weight=label_weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 预测"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Neural Factorization Machine"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# prelu-256-128\n",
    "top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]\n",
    "label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]\n",
    "label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)\n",
    "\n",
    "model_path = \"./save_NFM/\"\n",
    "if not os.path.exists(model_path): os.mkdir(model_path)\n",
    "model_path += \"NFM_prelu-256-128.pt\"\n",
    "\n",
    "model = NFM(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.fit(x=train_cached[\"train_model_input\"], y=train_cached[\"df_train_target_values\"],\n",
    "        validation_data = (dev_test_cached[\"dev_model_input\"], dev_test_cached[\"df_dev_target_values\"]),\n",
    "        batch_size=1024, epochs=10, verbose=1, label_weight=label_weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 预测"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# xDeepFM"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# prelu-256-128\n",
    "top10 = [5, 4, 1, 8, 22, 20, 29, 33, 15, 25]\n",
    "label_weight_norm_manul = [0] + [0.2/10 if i in top10 else 0.8/(121-10) for i in range(1,122)]\n",
    "label_weight = torch.tensor(label_weight_norm_manul).to(DEVICE)\n",
    "\n",
    "model_path = \"./save_xDeepFM/\"\n",
    "if not os.path.exists(model_path): os.mkdir(model_path)\n",
    "model_path += \"xDeepFM_prelu-256-128.pt\"\n",
    "\n",
    "model = xDeepFM(linear_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_feature_columns = train_cached[\"fixlen_feature_columns\"],\n",
    "                dnn_hidden_units=(256, 128), dnn_dropout=0.3, dnn_use_bn=True, dnn_activation='prelu',\n",
    "                task='multiclass', device=DEVICE, class_num=122, model_save_path=model_path)\n",
    "model.compile(\"adam\", \"cross_entropy\", metrics=['acc_top1', 'acc_top3'])\n",
    "\n",
    "model.fit(x=train_cached[\"train_model_input\"], y=train_cached[\"df_train_target_values\"],\n",
    "        validation_data = (dev_test_cached[\"dev_model_input\"], dev_test_cached[\"df_dev_target_values\"]),\n",
    "        batch_size=1024, epochs=10, verbose=1, label_weight=label_weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 预测"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prelu-256-128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Product-based Neural Network"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": true
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
