{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 数据说明\n",
    "1.ID - 唯一ID（不能用于预测）\n",
    "2.Gender - 性别\n",
    "3.City - 城市\n",
    "4.Monthly_Income - 月收入（以卢比为单位）\n",
    "5.DOB - 出生日期\n",
    "6.Lead_Creation_Date - 潜在（贷款）创建日期\n",
    "7.Loan_Amount_Applied - 贷款申请请求金额（印度卢比，INR）\n",
    "8.Loan_Tenure_Applied - 贷款申请期限（单位为年）\n",
    "9.Existing_EMI -现有贷款的EMI（EMI：电子货币机构许可证） \n",
    "10.Employer_Name雇主名称\n",
    "11.Salary_Account - 薪资帐户银行\n",
    "12.Mobile_Verified - 是否移动验证（Y / N）\n",
    "13.VAR5 - 连续型变量\n",
    "14.VAR1-  类别型变量\n",
    "15.Loan_Amount_Submitted - 提交的贷款金额（在看到资格后修改和选择）\n",
    "16.Loan_Tenure_Submitted - 提交的贷款期限（单位为年，在看到资格后修改和选择）\n",
    "17.Interest_Rate - 提交贷款金额的利率\n",
    "18.Processing_Fee - 提交贷款的处理费（INR）\n",
    "19.EMI_Loan_Submitted -提交的EMI贷款金额（INR）\n",
    "20.Filled_Form - 后期报价后是否已填写申请表格\n",
    "21.Device_Type - 进行申请的设备（浏览器/移动设备）\n",
    "22.Var2 - 类别型变量\n",
    "23.Source - 类别型变量\n",
    "24.Var4 - 类别型变量\n",
    "\n",
    "\n",
    "输出：\n",
    "25.LoggedIn - 是否login（只用于理解问题的变量，不能用于预测，测试集中没有）\n",
    "26. Disbursed - 是否发放贷款（目标变量），1为发放贷款（目标客户）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from xgboost import XGBClassifier\n",
    "\n",
    "import xgboost as xgb\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt \n",
    "# from matplotlib import pyplot\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "from sklearn.metrics import log_loss\n",
    "import seaborn as sns\n",
    "\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 特征工程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/anaconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py:2785: DtypeWarning: Columns (12,18) have mixed types. Specify dtype option on import or set low_memory=False.\n",
      "  interactivity=interactivity, compiler=compiler, result=result)\n"
     ]
    }
   ],
   "source": [
    "train = pd.read_csv('Train.csv')\n",
    "train.head()\n",
    "train.fillna(method='ffill',inplace=True)\n",
    "# train.fillna(0,inplace=True)\n",
    "# train.fillna(method='ffill')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "RangeIndex: 87020 entries, 0 to 87019\n",
      "Data columns (total 26 columns):\n",
      "ID                       87020 non-null object\n",
      "Gender                   87020 non-null object\n",
      "City                     87020 non-null object\n",
      "Monthly_Income           87020 non-null int64\n",
      "DOB                      87020 non-null object\n",
      "Lead_Creation_Date       87020 non-null object\n",
      "Loan_Amount_Applied      87020 non-null float64\n",
      "Loan_Tenure_Applied      87020 non-null float64\n",
      "Existing_EMI             87020 non-null float64\n",
      "Employer_Name            87020 non-null object\n",
      "Salary_Account           87020 non-null object\n",
      "Mobile_Verified          87020 non-null object\n",
      "Var5                     87020 non-null object\n",
      "Var1                     87020 non-null object\n",
      "Loan_Amount_Submitted    87019 non-null float64\n",
      "Loan_Tenure_Submitted    87019 non-null float64\n",
      "Interest_Rate            87019 non-null float64\n",
      "Processing_Fee           87015 non-null float64\n",
      "EMI_Loan_Submitted       87019 non-null object\n",
      "Filled_Form              87020 non-null object\n",
      "Device_Type              87020 non-null object\n",
      "Var2                     87020 non-null object\n",
      "Source                   87020 non-null object\n",
      "Var4                     87020 non-null int64\n",
      "LoggedIn                 87020 non-null int64\n",
      "Disbursed                87020 non-null float64\n",
      "dtypes: float64(8), int64(3), object(15)\n",
      "memory usage: 17.3+ MB\n"
     ]
    }
   ],
   "source": [
    "train.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#如果计算资源有限，也可只取少量样本，如取前1000个样本\n",
    "#（分类中其实还需要确保取出来的这部分样本各类样本的比例和总体一致）\n",
    "# n_trains = 10\n",
    "# train = train.head(n_trains)\n",
    "\n",
    "# train = train.sample(n = 10, weights = 'Disbursed')\n",
    "# train.head(10)\n",
    "#或者考虑用train_test_split而不是交叉验证来验证模型性能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# #Target 分布，看看各类样本分布是否均衡\n",
    "# # sns.countplot(train);\n",
    "# # pyplot.xlabel('Disbursed');\n",
    "# # pyplot.ylabel('Number of occurrences');\n",
    "\n",
    "# sns.countplot(train.Disbursed, order=[0, 1]);\n",
    "# plt.xlabel('Disbursed');\n",
    "# plt.ylabel('Number of occurrences');\n",
    "\n",
    "\n",
    "# print(train.Disbursed.values)\n",
    "# #每类样本分布不是很均匀，所以交叉验证时也考虑各类样本按比例抽取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Disbursed属性的不同取值和出现的次数\n",
      "0.0    85747\n",
      "1.0     1273\n",
      "Name: Disbursed, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "#对类别型特征，观察其取值范围及直方图\n",
    "categorical_features = ['Disbursed']\n",
    "for col in categorical_features:\n",
    "    print('\\n%s属性的不同取值和出现的次数'%col)\n",
    "    print(train[col].value_counts())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "#按比例随机抽样，适当增加正样本\n",
    "import random as rd\n",
    "import math as ma\n",
    "\n",
    "def typeicalSampling(group, typeicalFracDict):\n",
    "    name = group.name\n",
    "    frac = typeicalFracDict[name]\n",
    "    return group.sample(frac=frac)\n",
    "def group_sample(data_set,lable,typeicalFracDict):\n",
    "    #分层抽样\n",
    "    #data_set数据集\n",
    "    #lable分层变量名\n",
    "    #typeicalFracDict：分类抽样比例\n",
    "    gbr=data_set.groupby(by=[lable])\n",
    "    result=data_set.groupby(lable,group_keys=False).apply(typeicalSampling,typeicalFracDict)\n",
    "    return result\n",
    "\n",
    "data = train\n",
    "\n",
    "data_set=data\n",
    "label='Disbursed'\n",
    "\n",
    "typicalFracDict = {\n",
    "    0: 0.054747,\n",
    "    1: 0.2073\n",
    "}\n",
    "train=group_sample(data_set,label,typicalFracDict)\n",
    "# print(train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Disbursed属性的不同取值和出现的次数\n",
      "0.0    4694\n",
      "1.0     264\n",
      "Name: Disbursed, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "#对类别型特征，观察其取值范围及直方图\n",
    "categorical_features = ['Disbursed']\n",
    "for col in categorical_features:\n",
    "    print('\\n%s属性的不同取值和出现的次数'%col)\n",
    "    print(train[col].value_counts())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# train.head(10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "train['Gender'] =  train['Gender'].map(lambda x:1 if x == 'Female' else 0) \n",
    "\n",
    "train['Var5'] =  train['Var5'].map(lambda x:int(-1) if x == 'HBXX' else int(x)) \n",
    "\n",
    "train['DOB'] = pd.to_datetime(train['DOB'])\n",
    "train['DOB_month'] = train[\"DOB\"].dt.month  #减今年的第几天\n",
    "train['DOB_year'] = train[\"DOB\"].dt.year\n",
    "\n",
    "train['age'] = 2019 - train['DOB_year']\n",
    "train['Lead_Creation_Date'] = pd.to_datetime(train['Lead_Creation_Date'])\n",
    "train['Lead_Creation_Date_month'] = train[\"Lead_Creation_Date\"].dt.month  #减今年的第几天\n",
    "train['Lead_Creation_Date_year'] = train[\"Lead_Creation_Date\"].dt.year\n",
    "train['Lead_Creation_Date'] = 2019 - train['Lead_Creation_Date_year']\n",
    "\n",
    "# train['EMI_Loan_Submitted'] = pd.to_numeric(train['EMI_Loan_Submitted'])\n",
    "# train['Var5'] = pd.to_numeric(train['Var5']).astype(int)\n",
    "\n",
    "# train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# train.info()\n",
    "# categorical_features = ['Filled_Form','Device_Type','Mobile_Verified', 'Source', 'Var1', 'Var2', 'Var4']\n",
    "# x_train_cat = train[categorical_features]\n",
    "# x_train_cat = pd.get_dummies(x_train_cat)\n",
    "\n",
    "categorical_features = ['Filled_Form','Device_Type','Mobile_Verified', 'Source', 'Var4']\n",
    "x_train_cat = train[categorical_features]\n",
    "x_train_cat = pd.get_dummies(x_train_cat)\n",
    "\n",
    "# x_train_cat.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# drop ids and get labels\n",
    "# train['Disbursed'] =  train['Disbursed'].map(lambda x:1 if x == 1 else 2) \n",
    "\n",
    "y_train = train['Disbursed']\n",
    "X_train = train.drop(['Var1', 'Var2', 'ID', 'City','Employer_Name', 'DOB' ,'LoggedIn','Filled_Form','Device_Type', 'Mobile_Verified', 'Source', 'Var1', 'Var2', 'Var4','Salary_Account','EMI_Loan_Submitted'], axis=1)\n",
    "# X_train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "FE_X_train = pd.concat([x_train_cat, X_train], axis = 1)\n",
    "FE_X_train = FE_X_train.drop(['Var4'], axis=1)\n",
    "\n",
    "#保存数据\n",
    "FE_X_train.to_csv('FE_X_train.csv', index=False)\n",
    "FE_X_train = FE_X_train.drop(['Disbursed'], axis=1)\n",
    "# FE_X_train.head()\n",
    "# X_train = FE_X_train\n",
    "# FE_X_train.info()\n",
    "\n",
    "np.set_printoptions(threshold='nan')\n",
    "pd.set_option('display.max_columns',1000)\n",
    "pd.set_option('display.max_rows', 1000)\n",
    "pd.set_option('display.max_colwidth',1000)\n",
    "\n",
    "# # 数据标准化\n",
    "# from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "# # 初始化特征的标准化器\n",
    "# ss_X = StandardScaler()\n",
    "# ss_y = StandardScaler()\n",
    "\n",
    "# # 分别对训练和测试数据的特征进行标准化处理\n",
    "# FE_X_train = ss_X.fit_transform(FE_X_train)\n",
    "# # X_test = ss_X.transform(X_test)\n",
    "\n",
    "# # #对y标准化的好处是不同问题的w差异不太大，同时正则参数的范围也有限\n",
    "# # y_train = ss_y.fit_transform(y_train.reshape(-1, 1))\n",
    "# # y_test = ss_y.transform(y_test.reshape(-1, 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# prepare cross validation\n",
    "kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "xgb1 = XGBClassifier(\n",
    "        learning_rate =0.1,\n",
    "        n_estimators=1000,  #数值大没关系，cv会自动返回合适的n_estimators\n",
    "        max_depth=5,\n",
    "        min_child_weight=1,\n",
    "        gamma=0,\n",
    "        subsample=0.3,\n",
    "        colsample_bytree=0.8,\n",
    "        colsample_bylevel=0.7,\n",
    "        objective= 'binary:logistic',\n",
    "        seed=3)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def modelfit(alg, FE_X_train, y_train, cv_folds, early_stopping_rounds=10):\n",
    "    xgb_param = alg.get_xgb_params()\n",
    "    xgb_param['silent'] = 1\n",
    "    \n",
    "    #直接调用xgboost，而非sklarn的wrapper类\n",
    "    xgtrain = xgb.DMatrix(FE_X_train, label = y_train)\n",
    "\n",
    "    boost_round = alg.get_params()['n_estimators']\n",
    "    cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=boost_round, nfold = 3,\n",
    "                      metrics='error',early_stopping_rounds=early_stopping_rounds)\n",
    "    \n",
    "    cvresult.to_csv('1_nestimators.csv', index_label = 'n_estimators')\n",
    "        \n",
    "    #最佳参数n_estimators\n",
    "    n_estimators = cvresult.shape[0]\n",
    "    \n",
    "    # 采用交叉验证得到的最佳参数n_estimators，训练模型\n",
    "    alg.set_params(n_estimators = n_estimators)\n",
    "    alg.fit(FE_X_train, y_train, eval_metric='logloss')\n",
    "    \n",
    "    print(\"n_estimators=\", n_estimators)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "n_estimators= 14\n"
     ]
    }
   ],
   "source": [
    "modelfit(xgb1, FE_X_train, y_train, cv_folds = 5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# cvresult = pd.DataFrame.from_csv('1_nestimators.csv')\n",
    "\n",
    "# test_means = cvresult['test-error-mean']\n",
    "# test_stds = cvresult['test-error-std'] \n",
    "        \n",
    "# train_means = cvresult['train-error-mean']\n",
    "# train_stds = cvresult['train-error-std'] \n",
    "\n",
    "# x_axis = range(0, cvresult.shape[0])\n",
    "# plt.errorbar(test_means, test_stds ,3)\n",
    "\n",
    "# pyplot.errorbar(x_axis, test_means, yerr=test_stds ,label='Test')\n",
    "# pyplot.errorbar(x_axis, train_means, yerr=train_stds ,label='Train')\n",
    "# plt.title(\"XGBoost n_estimators vs Log Loss\")\n",
    "# plt.xlabel( 'n_estimators' )\n",
    "# plt.ylabel( 'Log Loss' )\n",
    "# plt.savefig( 'n_estimators4_1.png' )\n",
    "\n",
    "# pyplot.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: FutureWarning: from_csv is deprecated. Please use read_csv(...) instead. Note that some of the default arguments are different, so please refer to the documentation for from_csv when changing your function calls\n",
      "  \"\"\"Entry point for launching an IPython kernel.\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>train-error-mean</th>\n",
       "      <th>train-error-std</th>\n",
       "      <th>test-error-mean</th>\n",
       "      <th>test-error-std</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>n_estimators</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.060609</td>\n",
       "      <td>0.001735</td>\n",
       "      <td>0.066964</td>\n",
       "      <td>0.003980</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.052541</td>\n",
       "      <td>0.000517</td>\n",
       "      <td>0.055668</td>\n",
       "      <td>0.001719</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.052138</td>\n",
       "      <td>0.000521</td>\n",
       "      <td>0.053853</td>\n",
       "      <td>0.001719</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.052642</td>\n",
       "      <td>0.000661</td>\n",
       "      <td>0.053651</td>\n",
       "      <td>0.002067</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.052542</td>\n",
       "      <td>0.000874</td>\n",
       "      <td>0.053853</td>\n",
       "      <td>0.002162</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>0.052441</td>\n",
       "      <td>0.001005</td>\n",
       "      <td>0.053651</td>\n",
       "      <td>0.002004</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>0.052340</td>\n",
       "      <td>0.000896</td>\n",
       "      <td>0.054054</td>\n",
       "      <td>0.001880</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>0.052239</td>\n",
       "      <td>0.000385</td>\n",
       "      <td>0.053651</td>\n",
       "      <td>0.002446</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>0.052239</td>\n",
       "      <td>0.000628</td>\n",
       "      <td>0.053248</td>\n",
       "      <td>0.002162</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>0.052441</td>\n",
       "      <td>0.000762</td>\n",
       "      <td>0.053651</td>\n",
       "      <td>0.002067</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>0.052441</td>\n",
       "      <td>0.000873</td>\n",
       "      <td>0.053853</td>\n",
       "      <td>0.002275</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>0.052743</td>\n",
       "      <td>0.001120</td>\n",
       "      <td>0.053651</td>\n",
       "      <td>0.002067</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>0.052743</td>\n",
       "      <td>0.001120</td>\n",
       "      <td>0.053651</td>\n",
       "      <td>0.002067</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>0.052643</td>\n",
       "      <td>0.000994</td>\n",
       "      <td>0.053247</td>\n",
       "      <td>0.001719</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "              train-error-mean  train-error-std  test-error-mean  \\\n",
       "n_estimators                                                       \n",
       "0                     0.060609         0.001735         0.066964   \n",
       "1                     0.052541         0.000517         0.055668   \n",
       "2                     0.052138         0.000521         0.053853   \n",
       "3                     0.052642         0.000661         0.053651   \n",
       "4                     0.052542         0.000874         0.053853   \n",
       "5                     0.052441         0.001005         0.053651   \n",
       "6                     0.052340         0.000896         0.054054   \n",
       "7                     0.052239         0.000385         0.053651   \n",
       "8                     0.052239         0.000628         0.053248   \n",
       "9                     0.052441         0.000762         0.053651   \n",
       "10                    0.052441         0.000873         0.053853   \n",
       "11                    0.052743         0.001120         0.053651   \n",
       "12                    0.052743         0.001120         0.053651   \n",
       "13                    0.052643         0.000994         0.053247   \n",
       "\n",
       "              test-error-std  \n",
       "n_estimators                  \n",
       "0                   0.003980  \n",
       "1                   0.001719  \n",
       "2                   0.001719  \n",
       "3                   0.002067  \n",
       "4                   0.002162  \n",
       "5                   0.002004  \n",
       "6                   0.001880  \n",
       "7                   0.002446  \n",
       "8                   0.002162  \n",
       "9                   0.002067  \n",
       "10                  0.002275  \n",
       "11                  0.002067  \n",
       "12                  0.002067  \n",
       "13                  0.001719  "
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cvresult = pd.DataFrame.from_csv('1_nestimators.csv')\n",
    "cvresult"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
