{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import seaborn as sns\n",
    "import matplotlib.pyplot as plt\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n",
    "from sklearn.model_selection import KFold,StratifiedKFold\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import accuracy_score,roc_auc_score,f1_score,recall_score\n",
    "from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer,HashingVectorizer\n",
    "from sklearn.decomposition import TruncatedSVD,SparsePCA\n",
    "import gc\n",
    "import time\n",
    "import os\n",
    "import sys\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "train = pd.read_csv(\"../data/age_train.csv\",names=['uid','age_group']).sort_values(by=['uid'])\n",
    "test = pd.read_csv(\"../data/age_test.csv\",names=['uid']).sort_values(by=['uid'])\n",
    "info = pd.read_csv(\"../data/app_info.csv\",names=['appid','category'])\n",
    "active = pd.read_pickle(\"../pickle/user_app_active.pickle\")\n",
    "# usage = pd.read_pickle(\"../input2/user_app_usage.pickle\")#,names=['uid','appid','duration','times','use_date'],parse_dates=['use_date'])\n",
    "user_basic_info = pd.read_csv(\"../data/user_basic_info.csv\",names=['uid','gender','city','prodname','ramcapacity','ramleftration','romcapacity','romleftration','color','fontsize','ct','carrier','os']).sort_values(by=['uid'])\n",
    "behavior_info = pd.read_csv(\"../data/user_behavior_info.csv\",names=['uid','boottimes','a','b','c','d','e','f','g']).sort_values(by=['uid'])\n",
    "# (train.shape,test.shape),(info.shape,active.shape,usage.shape,user_basic_info.shape,behavior_info.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>age_group</th>\n",
       "      <th>uid</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>4.0</td>\n",
       "      <td>1000006</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>4.0</td>\n",
       "      <td>1000009</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>5.0</td>\n",
       "      <td>1000010</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>5.0</td>\n",
       "      <td>1000012</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4.0</td>\n",
       "      <td>1000027</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   age_group      uid\n",
       "0        4.0  1000006\n",
       "1        4.0  1000009\n",
       "2        5.0  1000010\n",
       "3        5.0  1000012\n",
       "4        4.0  1000027"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "all_data = train.append(test).reset_index(drop=True)\n",
    "all_data.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_category(x):\n",
    "    col = []\n",
    "    no_col = 0\n",
    "    for i in x:\n",
    "        try:\n",
    "            col.append(hash_dict[i])\n",
    "        except:\n",
    "            no_col+=1\n",
    "    return col,no_col\n",
    "\n",
    "hash_dict = dict(info.values)\n",
    "active['category'] = active['appid'].map(lambda x:get_category(x))\n",
    "active['category_nan']  = active['category'].map(lambda x:x[1])\n",
    "active['category']  = active['category'].map(lambda x:x[0])\n",
    "active['category_len'] = active['category'].map(lambda x:len(x))\n",
    "active['category_nunique'] = active['category'].map(lambda x:len(set(x)))\n",
    "active['category_ratio'] = active['category_nunique']/active['category_len']\n",
    "del active['category']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((5000000, 28), 5000000)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "all_data = all_data.merge(user_basic_info,how='left',on=['uid'])\n",
    "all_data = all_data.merge(behavior_info,how='left',on=['uid'])\n",
    "all_data = all_data.merge(active,how='left',on=['uid'])\n",
    "feature_name = [i for i in all_data.columns if i not in ['uid','age_group']]\n",
    "all_data.shape,len(all_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "all_data['city_count_user'] = all_data.groupby(['city'])['uid'].transform('count')\n",
    "all_data['romleftration_count_user'] = all_data.groupby(['romleftration'])['uid'].transform('count')\n",
    "all_data['prodname_count_user'] = all_data.groupby(['prodname'])['uid'].transform('count')\n",
    "all_data['color_count_user'] = all_data.groupby(['color'])['uid'].transform('count')\n",
    "all_data['ct_count_user'] = all_data.groupby(['ct'])['uid'].transform('count')\n",
    "all_data['carrier_count_user'] = all_data.groupby(['carrier'])['uid'].transform('count')\n",
    "\n",
    "all_data['city_nunique_user'] = all_data.groupby(['city'])['uid'].transform('nunique')\n",
    "all_data['romleftration_nunique_user'] = all_data.groupby(['romleftration'])['uid'].transform('nunique')\n",
    "all_data['prodname_nunique_user'] = all_data.groupby(['prodname'])['uid'].transform('nunique')\n",
    "all_data['ct_nunique_user'] = all_data.groupby(['ct'])['uid'].transform('nunique')\n",
    "all_data['carrier_nunique_user'] = all_data.groupby(['carrier'])['uid'].transform('nunique')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>age_group</th>\n",
       "      <th>uid</th>\n",
       "      <th>gender</th>\n",
       "      <th>city</th>\n",
       "      <th>prodname</th>\n",
       "      <th>ramcapacity</th>\n",
       "      <th>ramleftration</th>\n",
       "      <th>romcapacity</th>\n",
       "      <th>romleftration</th>\n",
       "      <th>color</th>\n",
       "      <th>...</th>\n",
       "      <th>romleftration_count_user</th>\n",
       "      <th>prodname_count_user</th>\n",
       "      <th>color_count_user</th>\n",
       "      <th>ct_count_user</th>\n",
       "      <th>carrier_count_user</th>\n",
       "      <th>city_nunique_user</th>\n",
       "      <th>romleftration_nunique_user</th>\n",
       "      <th>prodname_nunique_user</th>\n",
       "      <th>ct_nunique_user</th>\n",
       "      <th>carrier_nunique_user</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>4.0</td>\n",
       "      <td>1000006</td>\n",
       "      <td>1</td>\n",
       "      <td>c00253</td>\n",
       "      <td>p0054</td>\n",
       "      <td>8.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>128.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>翡冷翠</td>\n",
       "      <td>...</td>\n",
       "      <td>NaN</td>\n",
       "      <td>71149</td>\n",
       "      <td>53644</td>\n",
       "      <td>2514879.0</td>\n",
       "      <td>2463517</td>\n",
       "      <td>77071.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>71149</td>\n",
       "      <td>2514879.0</td>\n",
       "      <td>2463517</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>4.0</td>\n",
       "      <td>1000009</td>\n",
       "      <td>0</td>\n",
       "      <td>c0043</td>\n",
       "      <td>p0018</td>\n",
       "      <td>8.0</td>\n",
       "      <td>0.22</td>\n",
       "      <td>256.0</td>\n",
       "      <td>0.49</td>\n",
       "      <td>渐变黑</td>\n",
       "      <td>...</td>\n",
       "      <td>54080.0</td>\n",
       "      <td>22495</td>\n",
       "      <td>13283</td>\n",
       "      <td>2514879.0</td>\n",
       "      <td>2463517</td>\n",
       "      <td>213066.0</td>\n",
       "      <td>54080.0</td>\n",
       "      <td>22495</td>\n",
       "      <td>2514879.0</td>\n",
       "      <td>2463517</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>5.0</td>\n",
       "      <td>1000010</td>\n",
       "      <td>0</td>\n",
       "      <td>c00284</td>\n",
       "      <td>p0054</td>\n",
       "      <td>8.0</td>\n",
       "      <td>0.38</td>\n",
       "      <td>128.0</td>\n",
       "      <td>0.04</td>\n",
       "      <td>翡冷翠</td>\n",
       "      <td>...</td>\n",
       "      <td>49470.0</td>\n",
       "      <td>71149</td>\n",
       "      <td>53644</td>\n",
       "      <td>2514879.0</td>\n",
       "      <td>2463517</td>\n",
       "      <td>4897.0</td>\n",
       "      <td>49470.0</td>\n",
       "      <td>71149</td>\n",
       "      <td>2514879.0</td>\n",
       "      <td>2463517</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>5.0</td>\n",
       "      <td>1000012</td>\n",
       "      <td>0</td>\n",
       "      <td>c0087</td>\n",
       "      <td>p0059</td>\n",
       "      <td>4.0</td>\n",
       "      <td>0.34</td>\n",
       "      <td>64.0</td>\n",
       "      <td>0.21</td>\n",
       "      <td>香槟金</td>\n",
       "      <td>...</td>\n",
       "      <td>48572.0</td>\n",
       "      <td>96397</td>\n",
       "      <td>306158</td>\n",
       "      <td>2077881.0</td>\n",
       "      <td>1126914</td>\n",
       "      <td>30551.0</td>\n",
       "      <td>48572.0</td>\n",
       "      <td>96397</td>\n",
       "      <td>2077881.0</td>\n",
       "      <td>1126914</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4.0</td>\n",
       "      <td>1000027</td>\n",
       "      <td>0</td>\n",
       "      <td>c00206</td>\n",
       "      <td>p001</td>\n",
       "      <td>6.0</td>\n",
       "      <td>0.26</td>\n",
       "      <td>137.0</td>\n",
       "      <td>0.79</td>\n",
       "      <td>海鸥灰</td>\n",
       "      <td>...</td>\n",
       "      <td>53404.0</td>\n",
       "      <td>147835</td>\n",
       "      <td>58108</td>\n",
       "      <td>2077881.0</td>\n",
       "      <td>1126914</td>\n",
       "      <td>117881.0</td>\n",
       "      <td>53404.0</td>\n",
       "      <td>147835</td>\n",
       "      <td>2077881.0</td>\n",
       "      <td>1126914</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 39 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "   age_group      uid  gender    city prodname  ramcapacity  ramleftration  \\\n",
       "0        4.0  1000006       1  c00253    p0054          8.0            NaN   \n",
       "1        4.0  1000009       0   c0043    p0018          8.0           0.22   \n",
       "2        5.0  1000010       0  c00284    p0054          8.0           0.38   \n",
       "3        5.0  1000012       0   c0087    p0059          4.0           0.34   \n",
       "4        4.0  1000027       0  c00206     p001          6.0           0.26   \n",
       "\n",
       "   romcapacity  romleftration color  ...  romleftration_count_user  \\\n",
       "0        128.0            NaN   翡冷翠  ...                       NaN   \n",
       "1        256.0           0.49   渐变黑  ...                   54080.0   \n",
       "2        128.0           0.04   翡冷翠  ...                   49470.0   \n",
       "3         64.0           0.21   香槟金  ...                   48572.0   \n",
       "4        137.0           0.79   海鸥灰  ...                   53404.0   \n",
       "\n",
       "  prodname_count_user color_count_user  ct_count_user  carrier_count_user  \\\n",
       "0               71149            53644      2514879.0             2463517   \n",
       "1               22495            13283      2514879.0             2463517   \n",
       "2               71149            53644      2514879.0             2463517   \n",
       "3               96397           306158      2077881.0             1126914   \n",
       "4              147835            58108      2077881.0             1126914   \n",
       "\n",
       "   city_nunique_user  romleftration_nunique_user  prodname_nunique_user  \\\n",
       "0            77071.0                         NaN                  71149   \n",
       "1           213066.0                     54080.0                  22495   \n",
       "2             4897.0                     49470.0                  71149   \n",
       "3            30551.0                     48572.0                  96397   \n",
       "4           117881.0                     53404.0                 147835   \n",
       "\n",
       "   ct_nunique_user  carrier_nunique_user  \n",
       "0        2514879.0               2463517  \n",
       "1        2514879.0               2463517  \n",
       "2        2514879.0               2463517  \n",
       "3        2077881.0               1126914  \n",
       "4        2077881.0               1126914  \n",
       "\n",
       "[5 rows x 39 columns]"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "all_data.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "feature_name = [i for i in all_data.columns if i not in ['uid','age_group']]\n",
    "cat_col = [col for col in all_data.columns if all_data[col].dtype == np.object]\n",
    "num_col = [i for i in feature_name  if i not in cat_col]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "label_name = ['age_group']\n",
    "from tqdm import tqdm\n",
    "from scipy import sparse\n",
    "vector_feature = ['appid']\n",
    "onehot_feature =  [i for i in cat_col if i not in vector_feature]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "tr = None\n",
    "train_ix = list(range(train.shape[0]))\n",
    "test_ix = list(range(train.shape[0],all_data.shape[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print('onehot...')\n",
    "enc = OneHotEncoder(handle_unknown='ignore')\n",
    "for feature in tqdm(onehot_feature):\n",
    "    lbl = LabelEncoder()\n",
    "    all_data[feature] = lbl.fit_transform(all_data[feature].astype('str').fillna('0').values.reshape(-1, 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "from scipy import sparse\n",
    "\n",
    "c1 = sparse.load_npz(\"../vector/Sparse_Matrix/active_count.npz\")\n",
    "c2 = sparse.load_npz(\"../vector/Sparse_Matrix/active_tfidf.npz\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "svd = TruncatedSVD(n_components=100,n_iter=10,random_state=2019)\n",
    "c1_svd = svd.fit_transform(c1)\n",
    "c2_svd = svd.fit_transform(c2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "  0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\u001b[A\n",
      "\n",
      "100%|██████████| 1/1 [06:23<00:00, 383.46s/it]\u001b[A\u001b[A\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(4000000, 19888) (1000000, 19888)\n",
      "(4000000, 19919) (1000000, 19919)\n"
     ]
    }
   ],
   "source": [
    "print('cv...')\n",
    "cv = TfidfVectorizer(analyzer='word',token_pattern=u\"(?u)\\\\b\\\\w+\\\\b\",min_df=2)\n",
    "for feature in tqdm(vector_feature):\n",
    "    cv.fit(all_data[feature].astype('str'))\n",
    "    train_a = cv.transform(all_data[feature].astype('str').fillna(\"##\").loc[train_ix])\n",
    "    test_a = cv.transform(all_data[feature].astype('str').fillna(\"##\").loc[test_ix])\n",
    "    if tr is None:\n",
    "        tr = train_a\n",
    "        te = test_a\n",
    "    else:\n",
    "        tr = sparse.hstack((tr, train_a), 'csr')\n",
    "        te = sparse.hstack((te, test_a), 'csr')    \n",
    "print(tr.shape,te.shape)\n",
    "    \n",
    "cv = CountVectorizer(analyzer='word',token_pattern=u\"(?u)\\\\b\\\\w+\\\\b\",min_df=2)\n",
    "for feature in tqdm(vector_feature):\n",
    "    cv.fit(all_data[feature].astype('str'))\n",
    "    train_a = cv.transform(all_data[feature].astype('str').fillna(\"##\").loc[train_ix])\n",
    "    test_a = cv.transform(all_data[feature].astype('str').fillna(\"##\").loc[test_ix])\n",
    "    tr = sparse.hstack((tr, train_a), 'csr')\n",
    "    te = sparse.hstack((te, test_a), 'csr')\n",
    "\n",
    "print(tr.shape,te.shape)\n",
    "\n",
    "feature_name = [i for i in all_data.columns if i not in ['uid','age_group']]\n",
    "tr = sparse.hstack((tr,all_data.loc[train_ix][num_col]),'csr')\n",
    "te = sparse.hstack((te,all_data.loc[test_ix][num_col]),'csr')\n",
    "print(tr.shape,te.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "import catboost as cbt\n",
    "def acc_score(labels, preds):\n",
    "\n",
    "#     preds = check_prob(preds.reshape(12,-1))\n",
    "#     preds = np.argmax(preds, axis=0)\n",
    "    preds = np.argmax(preds, axis=1)\n",
    "    score = accuracy_score(y_true=preds, y_pred=labels)\n",
    "    return 'acc_score', score, True\n",
    "\n",
    "tr_index = ~all_data[label_name].isnull()\n",
    "X_train = tr#all_data[tr_index][list(set(feature_name))].reset_index(drop=True)\n",
    "y = train['age_group'].values - 1\n",
    "X_test = te#all_data[~tr_index][list(set(feature_name))].reset_index(drop=True)\n",
    "final_pred = []\n",
    "cv_score = []\n",
    "cv_model = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "jupyter": {
     "outputs_hidden": true
    },
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "Training until validation scores don't improve for 100 rounds.\n",
      "[10]\tvalid_0's multi_error: 0.5097\n",
      "[20]\tvalid_0's multi_error: 0.495596\n",
      "[30]\tvalid_0's multi_error: 0.484979\n",
      "[40]\tvalid_0's multi_error: 0.475964\n",
      "[50]\tvalid_0's multi_error: 0.46824\n",
      "[60]\tvalid_0's multi_error: 0.461814\n",
      "[70]\tvalid_0's multi_error: 0.456118\n",
      "[80]\tvalid_0's multi_error: 0.450941\n",
      "[90]\tvalid_0's multi_error: 0.446739\n",
      "[100]\tvalid_0's multi_error: 0.443093\n",
      "[110]\tvalid_0's multi_error: 0.439866\n",
      "[120]\tvalid_0's multi_error: 0.43687\n",
      "[130]\tvalid_0's multi_error: 0.434149\n",
      "[140]\tvalid_0's multi_error: 0.432026\n",
      "[150]\tvalid_0's multi_error: 0.430082\n",
      "[160]\tvalid_0's multi_error: 0.428454\n",
      "[170]\tvalid_0's multi_error: 0.426683\n",
      "[180]\tvalid_0's multi_error: 0.425089\n",
      "[190]\tvalid_0's multi_error: 0.423586\n",
      "[200]\tvalid_0's multi_error: 0.422318\n",
      "[210]\tvalid_0's multi_error: 0.421269\n",
      "[220]\tvalid_0's multi_error: 0.41996\n",
      "[230]\tvalid_0's multi_error: 0.418861\n",
      "[240]\tvalid_0's multi_error: 0.417895\n",
      "[250]\tvalid_0's multi_error: 0.417161\n",
      "[260]\tvalid_0's multi_error: 0.416261\n",
      "[270]\tvalid_0's multi_error: 0.415382\n",
      "[280]\tvalid_0's multi_error: 0.414625\n",
      "[290]\tvalid_0's multi_error: 0.413942\n",
      "[300]\tvalid_0's multi_error: 0.413233\n",
      "[310]\tvalid_0's multi_error: 0.412574\n",
      "[320]\tvalid_0's multi_error: 0.41209\n",
      "[330]\tvalid_0's multi_error: 0.411534\n",
      "[340]\tvalid_0's multi_error: 0.411112\n",
      "[350]\tvalid_0's multi_error: 0.4108\n",
      "[360]\tvalid_0's multi_error: 0.410457\n",
      "[370]\tvalid_0's multi_error: 0.409979\n"
     ]
    }
   ],
   "source": [
    "skf = StratifiedKFold(n_splits=5, random_state=2019, shuffle=True)\n",
    "for index, (train_index, test_index) in enumerate(skf.split(X_train, y)):\n",
    "    print(index)\n",
    "    lgb_model = lgb.LGBMClassifier(\n",
    "        boosting_type=\"gbdt\", num_leaves=128, reg_alpha=0.1, reg_lambda=1,\n",
    "        max_depth=-1, n_estimators=3000, objective='multiclass',num_class=6,\n",
    "        subsample=0.5, colsample_bytree=0.5, subsample_freq=1,min_child_samples=20,\n",
    "        learning_rate=0.1, random_state=2019 + index, n_jobs=50, metric=\"multi_error\", importance_type='gain'\n",
    "    )\n",
    "#     train_x, test_x, train_y, test_y = X_train.iloc[train_index], X_train.iloc[test_index], y.iloc[train_index], y.iloc[test_index]\n",
    "    train_x, test_x, train_y, test_y = X_train[train_index], X_train[test_index], y[train_index], y[test_index]\n",
    "    eval_set = [(test_x, test_y)]\n",
    "    lgb_model.fit(train_x, train_y, eval_set=eval_set,verbose=10,early_stopping_rounds=100)\n",
    "    # cbt_model = cbt.CatBoostClassifier(n_estimators=3000,learning_rate=0.1,max_depth=7,colsample_bytree=0.5,objective='multi_class',verbose=10,early_stopping_rounds=100,task_type='GPU',eval_metric='Accuracy')\n",
    "    # cbt_model.fit(train_x, train_y,eval_set=[test_x,test_y])\n",
    "    cv_model.append([lgb_model])\n",
    "    y_test = lgb_model.predict(X_test)\n",
    "    y_val = lgb_model.predict_proba(test_x)\n",
    "    cv_score.append(acc_score(test_y,y_val)[1])\n",
    "    if index == 0:\n",
    "        final_pred = np.array(y_test).reshape(-1, 1)\n",
    "    else:\n",
    "        final_pred = np.hstack((final_pred, np.array(y_test).reshape(-1, 1)))\n",
    "\n",
    "print(\"LGBM : \",np.mean(cv_score))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[[0.00299943 0.00118632 0.18409089 0.69738263 0.10134583 0.01299489]\n",
      " [0.01881538 0.03866722 0.5839238  0.30704702 0.04991326 0.0016333 ]\n",
      " [0.03562736 0.00789534 0.14133247 0.44927837 0.27335493 0.09251153]\n",
      " [0.02075523 0.03929126 0.14327357 0.17981842 0.23081689 0.38604464]\n",
      " [0.12674667 0.81609541 0.03854113 0.00866026 0.00854215 0.00141439]]\n",
      "1\n",
      "[[0.00407134 0.00123642 0.02171078 0.27080535 0.59786133 0.10431478]\n",
      " [0.00192695 0.00849684 0.07856286 0.22905042 0.64224228 0.03972065]\n",
      " [0.0650815  0.04353347 0.11124417 0.07979386 0.22312841 0.47721859]\n",
      " [0.10625459 0.29177171 0.1913272  0.19076278 0.18418211 0.0357016 ]\n",
      " [0.422402   0.24891585 0.13363914 0.08023887 0.06219008 0.05261405]]\n",
      "2\n",
      "[[5.86002423e-03 2.98108478e-02 9.53930247e-02 1.33689212e-01\n",
      "  4.33755096e-01 3.01491795e-01]\n",
      " [1.61726244e-01 8.21122060e-01 1.18322664e-02 2.52182633e-03\n",
      "  2.37350385e-03 4.24099114e-04]\n",
      " [1.50777118e-01 1.43257880e-01 3.42157920e-02 4.02181710e-01\n",
      "  1.69174384e-01 1.00393116e-01]\n",
      " [4.76134161e-01 4.21202768e-01 6.89653427e-02 1.65722225e-02\n",
      "  1.24049811e-02 4.72052437e-03]\n",
      " [1.72783071e-02 2.80142720e-02 1.93881210e-01 1.49255943e-01\n",
      "  3.76505645e-01 2.35064623e-01]]\n",
      "3\n",
      "[[0.11024663 0.02312595 0.11481489 0.246648   0.22873036 0.27643418]\n",
      " [0.03095403 0.025153   0.12632208 0.21398376 0.4136763  0.18991082]\n",
      " [0.11902463 0.26537262 0.17601302 0.15529703 0.23305098 0.05124172]\n",
      " [0.00644633 0.02442702 0.05363194 0.13376462 0.66200212 0.11972797]\n",
      " [0.0259515  0.14126611 0.43949526 0.25939508 0.1247867  0.00910534]]\n",
      "4\n",
      "[[1.94850668e-02 6.48375234e-03 5.87648556e-02 2.73812262e-01\n",
      "  5.76459474e-01 6.49945891e-02]\n",
      " [6.41311871e-01 8.35642501e-02 6.72315383e-02 1.42457473e-01\n",
      "  5.56393289e-02 9.79553829e-03]\n",
      " [4.96822239e-03 9.35527709e-01 4.49544074e-02 1.01115841e-02\n",
      "  4.08425814e-03 3.53818646e-04]\n",
      " [1.91848596e-01 1.32073879e-01 7.65545131e-02 8.39315041e-02\n",
      "  1.81029843e-01 3.34561665e-01]\n",
      " [1.53947844e-02 2.06651678e-02 7.09001231e-02 6.62882886e-02\n",
      "  3.66937623e-01 4.59814013e-01]]\n"
     ]
    }
   ],
   "source": [
    "# skf = StratifiedKFold(n_splits=5, random_state=2019, shuffle=True)\n",
    "cv_pred_lgb = np.zeros((X_train.shape[0],6))\n",
    "test_pred_lgb = np.zeros((X_test.shape[0],6))\n",
    "\n",
    "for index, (train_index, test_index) in enumerate(skf.split(X_train, y)):\n",
    "    print(index)\n",
    "    train_x, test_x, train_y, test_y = X_train[train_index], X_train[test_index], y[train_index], y[test_index]\n",
    "    y_val = cv_model[index][0].predict_proba(test_x)\n",
    "    cv_pred_lgb[test_index] = y_val\n",
    "    print(y_val[:5])\n",
    "    test_pred_lgb += cv_model[index][0].predict_proba(X_test) / 5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 1h 37min 51s, sys: 57 s, total: 1h 38min 48s\n",
      "Wall time: 1min 58s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "k = cv_model[0][0].predict_proba(test_x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [],
   "source": [
    "oof = pd.DataFrame(cv_pred_lgb)\n",
    "oof['uid'] = train['uid'].values\n",
    "\n",
    "pred = pd.DataFrame(test_pred_lgb)\n",
    "pred['uid'] = test['uid'].values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
