{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import lightgbm as lgb\n",
    "import datetime\n",
    "import math\n",
    "import gc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_path = './source_data/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Done loading...\n"
     ]
    }
   ],
   "source": [
    "train = pd.read_csv(data_path + 'train.csv', dtype={'msno' : 'object',\n",
    "                                                'source_system_tab' : 'object',\n",
    "                                                  'source_screen_name' : 'object',\n",
    "                                                  'source_type' : 'object',\n",
    "                                                  'target' : np.uint8,\n",
    "                                                  'song_id' : 'category'})\n",
    "test = pd.read_csv(data_path + 'test.csv', dtype={'msno' : 'category',\n",
    "                                                'source_system_tab' : 'category',\n",
    "                                                'source_screen_name' : 'category',\n",
    "                                                'source_type' : 'category',\n",
    "                                                'song_id' : 'category'})\n",
    "songs = pd.read_csv(data_path + 'songs.csv',dtype={'genre_ids': 'object',\n",
    "                                                  'language' : 'object',\n",
    "                                                  'artist_name' : 'object',\n",
    "                                                  'composer' : 'object',\n",
    "                                                  'lyricist' : 'object',\n",
    "                                                  'song_id' : 'object'})\n",
    "members = pd.read_csv(data_path + 'members.csv',dtype={'city' : 'category',\n",
    "                                                      'bd' : np.uint8,\n",
    "                                                      'gender' : 'category',\n",
    "                                                      'registered_via' : 'category'},\n",
    "                     parse_dates=['registration_init_time','expiration_date'])\n",
    "songs_extra = pd.read_csv(data_path + 'song_extra_info.csv')\n",
    "print('Done loading...')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1343"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 部分歌手数据清洗\n",
    "songs.artist_name[songs['artist_name'] == '爵士輕鬆聽'] = '爵士'\n",
    "songs.artist_name[songs['artist_name'] == '爵士演唱輯'] = '爵士'\n",
    "songs.artist_name[songs['artist_name'] == '爵士演奏輯'] = '爵士'\n",
    "songs.artist_name[songs['artist_name'] == '蓮緣系列-佛曲演唱'] = '清淨身心靈'\n",
    "songs.artist_name[songs['artist_name'] == '群星'] = 'Various'\n",
    "songs.artist_name[songs.artist_name.str.find('Various') >= 0] = 'Various'\n",
    "\n",
    "songs.composer[songs['composer'] == 'Unknow Composer'] = 'no_composer'\n",
    "songs.composer[songs['composer'] == '創時代音樂工作室'] = '創時代音樂製作有限公司'\n",
    "songs.composer[songs['composer'] == '久石譲 (Joe Hisaishi)'] = '久石譲'\n",
    "songs.composer[songs['composer'] == '久石讓'] = '久石譲'\n",
    "songs.composer[songs['composer'] == '莫札特'] = 'Mozart'\n",
    "songs.composer[songs['composer'] == '無'] = 'no_composer'\n",
    "songs.composer[songs['composer'] == '佚名'] = 'no_composer'\n",
    "songs.composer[songs['composer'] == '約翰威廉斯'] = 'John Williams'\n",
    "songs.composer[songs['composer'] == 'Unknow'] = 'no_composer'\n",
    "# 部分作词数据清洗\n",
    "songs.lyricist[songs['lyricist'] == ' '] = 'no_artist'\n",
    "songs.lyricist[songs['lyricist'] == '-'] = 'no_artist'\n",
    "songs.lyricist[songs['lyricist'] == 'None'] = 'no_artist'\n",
    "songs.lyricist[songs['lyricist'] == 'Unknown'] = 'no_artist'\n",
    "songs.lyricist[songs['lyricist'] == '—'] = 'no_artist'\n",
    "songs.lyricist[songs['lyricist'] == '―'] = 'no_artist'\n",
    "songs.lyricist[songs['lyricist'] == '傳統歌謠'] = '傳統'\n",
    "songs.lyricist[songs['lyricist'] == '傳統曲'] = '傳統'\n",
    "songs.lyricist[songs['lyricist'] == '林夕 '] = '林夕'\n",
    "songs.lyricist[songs['lyricist'] == 'Lin Xi'] = '林夕'\n",
    "songs.lyricist[songs['lyricist'] == '秋元 康'] = '秋元康'\n",
    "songs.lyricist[songs['lyricist'] == '秋元　康'] = '秋元康'\n",
    "songs.lyricist[songs['lyricist'] == '秋元 康（Akimoto yasushi）'] = '秋元康'\n",
    "songs.lyricist[songs['lyricist'] == '秋元康'] = '秋元康'\n",
    "songs.lyricist[songs['lyricist'] == '秋元康(Akimoto yasushi)'] = '秋元康'\n",
    "songs.lyricist[songs['lyricist'] == '無'] = 'no_artist'\n",
    "songs.lyricist[songs['lyricist'] == '夏奇拉|Luis Fernando Ochoa'] = '夏奇拉'\n",
    "songs.lyricist[songs['lyricist'] == '佚名'] = 'no_artist'\n",
    "songs.lyricist[songs['lyricist'] == '畑 亜貴'] = '畑亜貴'\n",
    "songs.lyricist[songs['lyricist'] == '畑　亜貴'] = '畑亜貴'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def same(x,y):\n",
    "    if isinstance(x,str) and isinstance(y,str):\n",
    "        return (x.find(y) + y.find(x)) > -2\n",
    "    return 0\n",
    "songs['artist_composer'] = songs.apply(lambda x:same(x['artist_name'],x['composer']),axis=1).astype(np.int8)\n",
    "songs['artist_lyricist'] = songs.apply(lambda x:same(x['artist_name'],x['lyricist']),axis=1).astype(np.int8)\n",
    "songs['artist_composer_lyricist'] = ((songs.artist_composer + songs.artist_lyricist) == 2).astype(np.int8)\n",
    "\n",
    "# songs['artist_composer'] = (songs['artist_name'] == songs['composer']).astype(np.int8)\n",
    "# songs['artist_lyricist'] = (songs['artist_name'] == songs['lyricist']).astype(np.int8)\n",
    "# songs['artist_composer_lyricist'] = ((songs.artist_composer + songs.artist_lyricist) == 2).astype(np.int8)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Data merging...\n",
      "Done merging...\n"
     ]
    }
   ],
   "source": [
    "print('Data merging...')\n",
    "\n",
    "train = train.merge(songs, on='song_id', how='left')\n",
    "test = test.merge(songs, on='song_id', how='left')\n",
    "\n",
    "members['membership_days'] = members['expiration_date'].subtract(members['registration_init_time']).dt.days.astype(int)\n",
    "\n",
    "members['registration_year'] = members['registration_init_time'].dt.year\n",
    "members['registration_month'] = members['registration_init_time'].dt.month\n",
    "members['registration_date'] = members['registration_init_time'].dt.day\n",
    "\n",
    "members['expiration_year'] = members['expiration_date'].dt.year\n",
    "members['expiration_month'] = members['expiration_date'].dt.month\n",
    "members['expiration_date'] = members['expiration_date'].dt.day\n",
    "members = members.drop(['registration_init_time'], axis=1)\n",
    "\n",
    "def isrc_to_year(isrc):\n",
    "    if type(isrc) == str:\n",
    "        if int(isrc[5:7]) > 17:\n",
    "            return 1900 + int(isrc[5:7])\n",
    "        else:\n",
    "            return 2000 + int(isrc[5:7])\n",
    "    else:\n",
    "        return np.nan\n",
    "        \n",
    "songs_extra['song_year'] = songs_extra['isrc'].apply(isrc_to_year)\n",
    "songs_extra.drop(['isrc', 'name'], axis = 1, inplace = True)\n",
    "\n",
    "train = train.merge(members, on='msno', how='left')\n",
    "test = test.merge(members, on='msno', how='left')\n",
    "\n",
    "train = train.merge(songs_extra, on = 'song_id', how = 'left')\n",
    "train.song_length.fillna(200000,inplace=True)\n",
    "train.song_length = train.song_length.astype(np.uint32)\n",
    "train.song_id = train.song_id.astype('category')\n",
    "\n",
    "\n",
    "test = test.merge(songs_extra, on = 'song_id', how = 'left')\n",
    "test.song_length.fillna(200000,inplace=True)\n",
    "test.song_length = test.song_length.astype(np.uint32)\n",
    "test.song_id = test.song_id.astype('category')\n",
    "\n",
    "# import gc\n",
    "# del members, songs; gc.collect();\n",
    "\n",
    "print('Done merging...')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# train = train.drop('gender',axis=1)\n",
    "# test = test.drop('gender',axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Adding new features\n",
      "Done adding features\n"
     ]
    }
   ],
   "source": [
    "print (\"Adding new features\")\n",
    "\n",
    "def genre_id_count(x):\n",
    "    if x == 'no_genre_id':\n",
    "        return 0\n",
    "    else: \n",
    "        return x.count('|') + 1\n",
    "\n",
    "train['genre_ids'].fillna('no_genre_id',inplace=True)\n",
    "test['genre_ids'].fillna('no_genre_id',inplace=True)\n",
    "train['genre_ids_count'] = train['genre_ids'].apply(genre_id_count).astype(np.int8)\n",
    "test['genre_ids_count'] = test['genre_ids'].apply(genre_id_count).astype(np.int8)\n",
    "\n",
    "def lyricist_count(x):\n",
    "    if x == 'no_lyricist':\n",
    "        return 0\n",
    "    else:\n",
    "        return sum(map(x.count, ['|', '/', '\\\\', ';'])) + 1\n",
    "    return sum(map(x.count, ['|', '/', '\\\\', ';']))\n",
    "\n",
    "train['lyricist'].fillna('no_lyricist',inplace=True)\n",
    "test['lyricist'].fillna('no_lyricist',inplace=True)\n",
    "train['lyricists_count'] = train['lyricist'].apply(lyricist_count).astype(np.int8)\n",
    "test['lyricists_count'] = test['lyricist'].apply(lyricist_count).astype(np.int8)\n",
    "\n",
    "def composer_count(x):\n",
    "    if x == 'no_composer':\n",
    "        return 0\n",
    "    else:\n",
    "        return sum(map(x.count, ['|', '/', '\\\\', ';'])) + 1\n",
    "\n",
    "train['composer'].fillna('no_composer',inplace=True)\n",
    "test['composer'].fillna('no_composer',inplace=True)\n",
    "train['composer_count'] = train['composer'].apply(composer_count).astype(np.int8)\n",
    "test['composer_count'] = test['composer'].apply(composer_count).astype(np.int8)\n",
    "\n",
    "def is_featured(x):\n",
    "    if 'feat' in str(x) :\n",
    "        return 1\n",
    "    return 0\n",
    "\n",
    "train['artist_name'].fillna('no_artist',inplace=True)\n",
    "test['artist_name'].fillna('no_artist',inplace=True)\n",
    "train['is_featured'] = train['artist_name'].apply(is_featured).astype(np.int8)\n",
    "test['is_featured'] = test['artist_name'].apply(is_featured).astype(np.int8)\n",
    "\n",
    "def artist_count(x):\n",
    "    if x == 'no_artist':\n",
    "        return 0\n",
    "    else:\n",
    "        return x.count('and') + x.count(',') + x.count('feat') + x.count('&')\n",
    "\n",
    "train['artist_count'] = train['artist_name'].apply(artist_count).astype(np.int8)\n",
    "test['artist_count'] = test['artist_name'].apply(artist_count).astype(np.int8)\n",
    "\n",
    "# if artist is same as composer\n",
    "# train['artist_composer'] = (train['artist_name'] == train['composer']).astype(np.int8)\n",
    "# test['artist_composer'] = (test['artist_name'] == test['composer']).astype(np.int8)\n",
    "\n",
    "\n",
    "# # if artist, lyricist and composer are all three same\n",
    "# train['artist_composer_lyricist'] = ((train['artist_name'] == train['composer']) & (train['artist_name'] == train['lyricist']) & (train['composer'] == train['lyricist'])).astype(np.int8)\n",
    "# test['artist_composer_lyricist'] = ((test['artist_name'] == test['composer']) & (test['artist_name'] == test['lyricist']) & (test['composer'] == test['lyricist'])).astype(np.int8)\n",
    "\n",
    "# is song language 17 or 45. \n",
    "def song_lang_boolean(x):\n",
    "    if '17.0' in str(x) or '45.0' in str(x):\n",
    "        return 1\n",
    "    return 0\n",
    "\n",
    "train.language = train.language.astype('object')\n",
    "test.language = test.language.astype('object')\n",
    "train.language.fillna('-1',inplace=True)\n",
    "test.language.fillna('-1',inplace=True)\n",
    "train['song_lang_boolean'] = train['language'].apply(song_lang_boolean).astype(np.int8)\n",
    "test['song_lang_boolean'] = test['language'].apply(song_lang_boolean).astype(np.int8)\n",
    "\n",
    "\n",
    "_mean_song_length = np.mean(train['song_length'])\n",
    "def smaller_song(x):\n",
    "    if x < _mean_song_length:\n",
    "        return 1\n",
    "    return 0\n",
    "\n",
    "train['smaller_song'] = train['song_length'].apply(smaller_song).astype(np.int8)\n",
    "test['smaller_song'] = test['song_length'].apply(smaller_song).astype(np.int8)\n",
    "\n",
    "\n",
    "# number of times a song has been played before\n",
    "_dict_count_song_played_train = {k: v for k, v in train['song_id'].value_counts().iteritems()}\n",
    "_dict_count_song_played_test = {k: v for k, v in test['song_id'].value_counts().iteritems()}\n",
    "def count_song_played(x):\n",
    "    try:\n",
    "        return _dict_count_song_played_train[x]\n",
    "    except KeyError:\n",
    "        try:\n",
    "            return _dict_count_song_played_test[x]\n",
    "        except KeyError:\n",
    "            return 0\n",
    "    \n",
    "\n",
    "train['count_song_played'] = train['song_id'].apply(count_song_played).astype(np.int64)\n",
    "test['count_song_played'] = test['song_id'].apply(count_song_played).astype(np.int64)\n",
    "\n",
    "# # number of times the artist has been played\n",
    "_dict_count_artist_played_train = {k: v for k, v in train['artist_name'].value_counts().iteritems()}\n",
    "_dict_count_artist_played_test = {k: v for k, v in test['artist_name'].value_counts().iteritems()}\n",
    "def count_artist_played(x):\n",
    "    try:\n",
    "        return _dict_count_artist_played_train[x]\n",
    "    except KeyError:\n",
    "        try:\n",
    "            return _dict_count_artist_played_test[x]\n",
    "        except KeyError:\n",
    "            return 0\n",
    "\n",
    "train['count_artist_played'] = train['artist_name'].apply(count_artist_played).astype(np.int64)\n",
    "test['count_artist_played'] = test['artist_name'].apply(count_artist_played).astype(np.int64)\n",
    "\n",
    "print (\"Done adding features\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "train.artist_count[train['artist_name'] == 'Various'] = 15\n",
    "test.artist_count[test['artist_name'] == 'Various'] = 15"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train test and validation sets\n",
      "Processed data...\n"
     ]
    }
   ],
   "source": [
    "print (\"Train test and validation sets\")\n",
    "for col in train.columns:\n",
    "    if train[col].dtype == object:\n",
    "        train[col] = train[col].astype('category')\n",
    "        test[col] = test[col].astype('category')\n",
    "\n",
    "\n",
    "X_train = train.drop(['target'], axis=1)\n",
    "y_train = train['target'].values\n",
    "\n",
    "\n",
    "X_test = test.drop(['id'], axis=1)\n",
    "ids = test['id'].values\n",
    "\n",
    "\n",
    "# del train, test; gc.collect();\n",
    "\n",
    "d_train_final = lgb.Dataset(X_train, y_train)\n",
    "watchlist_final = lgb.Dataset(X_train, y_train)\n",
    "print('Processed data...')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[50]\tvalid_0's auc: 0.756766\n"
     ]
    }
   ],
   "source": [
    "params = {\n",
    "        'objective': 'binary',\n",
    "        'boosting': 'gbdt',\n",
    "        'learning_rate': 0.2 ,\n",
    "        'verbose': 0,\n",
    "        'num_leaves': 108,\n",
    "        'bagging_fraction': 0.95,\n",
    "        'bagging_freq': 1,\n",
    "        'bagging_seed': 1,\n",
    "        'feature_fraction': 0.9,\n",
    "        'feature_fraction_seed': 1,\n",
    "        'max_bin': 256,\n",
    "        'max_depth': 10,\n",
    "        'num_rounds':1000,\n",
    "        'metric' : 'auc'\n",
    "    }\n",
    "\n",
    "model_f1 = lgb.train(params, train_set=d_train_final, \n",
    "                     valid_sets=watchlist_final, verbose_eval=50)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Making predictions\n",
      "Done making predictions\n"
     ]
    }
   ],
   "source": [
    "print('Making predictions')\n",
    "p_test_1 = model_f1.predict(X_test)\n",
    "\n",
    "print('Done making predictions')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Saving predictions Model model of gbdt\n",
      "Done!\n"
     ]
    }
   ],
   "source": [
    "print ('Saving predictions Model model of gbdt')\n",
    "\n",
    "subm = pd.DataFrame()\n",
    "subm['id'] = ids\n",
    "subm['target'] = p_test_1\n",
    "subm.to_csv(data_path + 'submission_lgbm_avg.csv.gz', compression = 'gzip', index=False, float_format = '%.5f')\n",
    "\n",
    "print('Done!')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 融合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[50]\tvalid_0's auc: 0.66628\n",
      "[100]\tvalid_0's auc: 0.672916\n",
      "[150]\tvalid_0's auc: 0.676279\n",
      "[200]\tvalid_0's auc: 0.678751\n",
      "[250]\tvalid_0's auc: 0.679939\n",
      "[300]\tvalid_0's auc: 0.680944\n",
      "[350]\tvalid_0's auc: 0.681793\n",
      "[400]\tvalid_0's auc: 0.682275\n",
      "[450]\tvalid_0's auc: 0.682902\n",
      "[500]\tvalid_0's auc: 0.683192\n",
      "[550]\tvalid_0's auc: 0.683368\n",
      "[600]\tvalid_0's auc: 0.68381\n",
      "[650]\tvalid_0's auc: 0.684272\n",
      "[700]\tvalid_0's auc: 0.684416\n",
      "[750]\tvalid_0's auc: 0.684437\n",
      "[800]\tvalid_0's auc: 0.684504\n",
      "[850]\tvalid_0's auc: 0.684463\n",
      "[900]\tvalid_0's auc: 0.684443\n",
      "[950]\tvalid_0's auc: 0.684622\n",
      "[1000]\tvalid_0's auc: 0.684624\n"
     ]
    }
   ],
   "source": [
    "d_train_final = lgb.Dataset(X_train.iloc[:6000000], y_train[:6000000])\n",
    "watchlist_final = lgb.Dataset(X_train.iloc[6000000:], y_train[6000000:])\n",
    "# gbdt\n",
    "params = {\n",
    "        'objective': 'binary',\n",
    "        'boosting': 'gbdt',\n",
    "        'learning_rate': 0.2 ,\n",
    "        'verbose': 0,\n",
    "        'num_leaves': 108,\n",
    "        'bagging_fraction': 0.95,\n",
    "        'bagging_freq': 1,\n",
    "        'bagging_seed': 1,\n",
    "        'feature_fraction': 0.9,\n",
    "        'feature_fraction_seed': 1,\n",
    "        'max_bin': 256,\n",
    "        'max_depth': 10,\n",
    "        'num_rounds':1000,\n",
    "        'metric' : 'auc'\n",
    "    }\n",
    "\n",
    "model_f1 = lgb.train(params, train_set=d_train_final, \n",
    "                     valid_sets=watchlist_final, verbose_eval=50)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "gbdt_train = model_f1.predict(X_train.iloc[6000000:])\n",
    "gbdt_test = model_f1.predict(X_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[50]\tvalid_0's auc: 0.664187\n",
      "[100]\tvalid_0's auc: 0.669147\n",
      "[150]\tvalid_0's auc: 0.672043\n",
      "[200]\tvalid_0's auc: 0.675124\n",
      "[250]\tvalid_0's auc: 0.676854\n",
      "[300]\tvalid_0's auc: 0.678421\n",
      "[350]\tvalid_0's auc: 0.679069\n",
      "[400]\tvalid_0's auc: 0.680244\n",
      "[450]\tvalid_0's auc: 0.681303\n",
      "[500]\tvalid_0's auc: 0.682062\n",
      "[550]\tvalid_0's auc: 0.682528\n",
      "[600]\tvalid_0's auc: 0.682691\n",
      "[650]\tvalid_0's auc: 0.683242\n",
      "[700]\tvalid_0's auc: 0.683493\n",
      "[750]\tvalid_0's auc: 0.683751\n",
      "[800]\tvalid_0's auc: 0.684061\n"
     ]
    }
   ],
   "source": [
    "# dart\n",
    "params = {\n",
    "        'objective': 'binary',\n",
    "        'boosting': 'dart',\n",
    "        'learning_rate': 0.2 ,\n",
    "        'verbose': 0,\n",
    "        'num_leaves': 108,\n",
    "        'bagging_fraction': 0.95,\n",
    "        'bagging_freq': 1,\n",
    "        'bagging_seed': 1,\n",
    "        'feature_fraction': 0.9,\n",
    "        'feature_fraction_seed': 1,\n",
    "        'max_bin': 256,\n",
    "        'max_depth': 10,\n",
    "        'num_rounds':800,\n",
    "        'metric' : 'auc'\n",
    "    }\n",
    "\n",
    "model_f1 = lgb.train(params, train_set=d_train_final, \n",
    "                     valid_sets=watchlist_final, verbose_eval=50)\n",
    "\n",
    "dart_train = model_f1.predict(X_train.iloc[6000000:])\n",
    "dart_test = model_f1.predict(X_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_lgbm = pd.DataFrame({'gbdt':gbdt_train,'dart':dart_train,'target':y_train[6000000:]})\n",
    "test_lgbm = pd.DataFrame({'gbdt':gbdt_test,'dart':dart_test})\n",
    "train_lgbm.to_csv('./target_data/train_gbdt.csv',index=False)\n",
    "test_lgbm.to_csv('./target_data/test_gbdt.csv',index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### gbdt+lr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "params = {\n",
    "        'objective': 'binary',\n",
    "        'boosting': 'gbdt',\n",
    "        'learning_rate': 0.2 ,\n",
    "        'verbose': 0,\n",
    "        'num_leaves': 256,\n",
    "        'bagging_fraction': 0.95,\n",
    "        'bagging_freq': 1,\n",
    "        'bagging_seed': 1,\n",
    "        'feature_fraction': 0.9,\n",
    "        'feature_fraction_seed': 1,\n",
    "        'max_bin': 256,\n",
    "        'max_depth': 8,\n",
    "        'num_rounds':50,\n",
    "        'metric' : 'auc'\n",
    "    }\n",
    "\n",
    "model_f1 = lgb.train(params, train_set=d_train_final, \n",
    "                     valid_sets=watchlist_final, verbose_eval=50)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_gbdt_leaf = model_f1.predict(X_train,pred_leaf=True)\n",
    "test_gbdt_leaf = model_f1.predict(X_test,pred_leaf=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(7377418, 50)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_gbdt_leaf.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_gbdt_leaf = pd.DataFrame(train_gbdt_leaf,columns=[\"class%d\"%(i+1) for i in range(0,50)])\n",
    "train_gbdt_leaf['target'] = y_train\n",
    "train_gbdt_leaf.to_csv('./target_data/train_gbdt_leaf.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_gbdt_leaf = pd.DataFrame(test_gbdt_leaf,columns=[\"class%d\"%(i+1) for i in range(0,50)])\n",
    "test_gbdt_leaf['id'] = ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_gbdt_leaf.to_csv('./target_data/test_gbdt_leaf.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "59"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
