{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from scipy.sparse import coo_matrix, vstack\n",
    "from collections import Counter, namedtuple\n",
    "from functools import reduce\n",
    "from tqdm import tqdm\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.model_selection import cross_validate\n",
    "from sklearn.feature_extraction.text import TfidfTransformer\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.model_selection import cross_validate\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _word_ngrams(tokens, ngram_range, stop_words=None):\n",
    "    \"\"\"Turn tokens into a sequence of n-grams after stop words filtering\n",
    "    copy from https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L148\n",
    "    \"\"\"\n",
    "    # handle stop words\n",
    "    if stop_words is not None:\n",
    "        tokens = [w for w in tokens if w not in stop_words]\n",
    "\n",
    "    # handle token n-grams\n",
    "    min_n, max_n = ngram_range\n",
    "    if max_n != 1:\n",
    "        original_tokens = tokens\n",
    "        if min_n == 1:\n",
    "            # no need to do any slicing for unigrams\n",
    "            # just iterate through the original tokens\n",
    "            tokens = list(original_tokens)\n",
    "            min_n += 1\n",
    "        else:\n",
    "            tokens = []\n",
    "\n",
    "        n_original_tokens = len(original_tokens)\n",
    "\n",
    "        # bind method outside of loop to reduce overhead\n",
    "        tokens_append = tokens.append\n",
    "        space_join = \" \".join\n",
    "\n",
    "        for n in range(min_n,\n",
    "                        min(max_n + 1, n_original_tokens + 1)):\n",
    "            for i in range(n_original_tokens - n + 1):\n",
    "                tokens_append(space_join(original_tokens[i: i + n]))\n",
    "\n",
    "    return tokens\n",
    "\n",
    "def line_generator(filename):\n",
    "    if filename is not None:\n",
    "        f = open(filename, encoding='utf-8')\n",
    "    else:\n",
    "        f = sys.stdin\n",
    "    for line in f:\n",
    "        yield line\n",
    "    f.close()\n",
    "\n",
    "\n",
    "def row_generator(g):\n",
    "    fields = next(g).strip().split(',')\n",
    "    Row = namedtuple('Row', fields)\n",
    "    for line in g:\n",
    "        yield Row(*line.strip().split(','))\n",
    "\n",
    "\n",
    "def group_generator(g, group_keys):\n",
    "    buffer = []\n",
    "    last_gid = None\n",
    "    if not isinstance(group_keys, list):\n",
    "        group_keys = [group_keys]\n",
    "    for row in g:\n",
    "        cur_gid = tuple(getattr(row, k) for k in group_keys)\n",
    "        if last_gid is None:\n",
    "            last_gid = cur_gid\n",
    "        if cur_gid != last_gid:\n",
    "            last_gid = cur_gid\n",
    "            yield buffer\n",
    "            buffer = []\n",
    "        buffer.append(row)\n",
    "    yield buffer\n",
    "\n",
    "def ngram_generator(g, ngram_range):\n",
    "    ThreadSample = namedtuple('ThreadSample', ['file_id', 'ngrams', 'label'])\n",
    "    for group in g:\n",
    "        ng_cnt = Counter()\n",
    "        file_id = group[0].file_id\n",
    "        label = getattr(group[0], 'label', -1)\n",
    "        apis = [row.api for row in group]\n",
    "        for gram in _word_ngrams(apis, ngram_range):\n",
    "            ng_cnt[gram] += 1\n",
    "        yield ThreadSample(file_id, ng_cnt, label)\n",
    "\n",
    "def file_sample_generator(g):\n",
    "    FileSample = namedtuple('FileSample', ['file_id', 'ngrams', 'label'])\n",
    "    for group in g:\n",
    "        file_id = group[0].file_id\n",
    "        label = getattr(group[0], 'label', -1)\n",
    "        ngrams = reduce(Counter.__add__, (t.ngrams for t in group))\n",
    "        yield FileSample(file_id, ngrams, label)\n",
    "\n",
    "class ValueEncoder(object):\n",
    "    def __init__(self):\n",
    "        self.val_dict = {}\n",
    "\n",
    "    def encode(self, val):\n",
    "        if val not in self.val_dict:\n",
    "            self.val_dict[val] = len(self.val_dict)\n",
    "        return self.val_dict[val]\n",
    "\n",
    "def extract_bow_features_from_file(path, value_encoder, ngram_range=(1,), total=None):\n",
    "    g = line_generator(path)\n",
    "    g = row_generator(g)\n",
    "    g = group_generator(g, ['file_id', 'tid'])\n",
    "    g = ngram_generator(g, ngram_range=(1, 3))\n",
    "    g = group_generator(g, 'file_id')\n",
    "    g = file_sample_generator(g)\n",
    "    rows, cols, values = [], [], []\n",
    "    labels = []\n",
    "    for file_sample in tqdm(g, total=total, ncols=80):\n",
    "        file_id = int(file_sample.file_id)\n",
    "        label = int(file_sample.label)\n",
    "        for ngram, cnt in file_sample.ngrams.items():\n",
    "            rows.append(file_id)\n",
    "            cols.append(value_encoder.encode(ngram))\n",
    "            values.append(cnt)\n",
    "        labels.append(label)\n",
    "    bow_feats = coo_matrix((values, (rows, cols)))\n",
    "    return bow_feats, labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|                                       | 12/116624 [00:00<25:55, 74.98it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting (1,3)-grams from train file ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████| 116624/116624 [1:16:09<00:00, 25.52it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting (1,3)-grams from test file ...\n",
      "(116624, 345903) (116624,)\n"
     ]
    }
   ],
   "source": [
    "train_file = 'test.csv'\n",
    "ve = ValueEncoder()\n",
    "print('Extracting (1,3)-grams from train file ...')\n",
    "tr_bow_feats, tr_labels = extract_bow_features_from_file(train_file, ve,\n",
    "                                                             ngram_range=(1, 3), total=116624)\n",
    "print('Extracting (1,3)-grams from test file ...')\n",
    "import numpy as np\n",
    "train_y=np.array(tr_labels)\n",
    "train_X=tr_bow_feats\n",
    "#te_bow_feats, _ = extract_bow_features_from_file(test_file, ve,\n",
    "                                                     #ngram_range=(1, 3), total=53093)\n",
    "print('Calculating TFIDF Value ...')\n",
    "#num_feats = te_bow_feats.shape[1]\n",
    "#tr_bow_feats.resize((tr_bow_feats.shape[0], num_feats))\n",
    "\n",
    "tfidf_tsfm = TfidfTransformer()\n",
    "tfidf_tsfm.fit(tr_bow_feats)\n",
    "tr_bow_feats = tfidf_tsfm.transform(tr_bow_feats)\n",
    "#te_bow_feats = tfidf_tsfm.transform(te_bow_feats)\n",
    "rfc = RandomForestClassifier(n_estimators=50, n_jobs=-1)\n",
    "\n",
    "print('Cross validation')\n",
    "cv_res = cross_validate(rfc, tr_bow_feats, tr_labels, scoring='neg_log_loss', return_train_score=True)\n",
    "print(cv_res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def runXGB(train_X,train_y,test_X,test_y=None,feature_names=None,seed_val=0,num_rounds=1000):\n",
    "    #参数设定\n",
    "    param = {}\n",
    "    param['objective'] = 'multi:softprob'#多分类、输出概率值\n",
    "    param['eta'] = 0.1#学习率\n",
    "    param['max_depth'] = 6#最大深度，越大越容易过拟合\n",
    "    param['silent'] = 1#打印提示信息\n",
    "    param['num_class'] = 6#三个类别\n",
    "    param['eval_metric']= \"mlogloss\"#对数损失\n",
    "    param['min_child_weight']=1#停止条件，这个参数非常影响结果，控制叶子节点中二阶导的和的最小值，该参数值越小，越容易 overfitting。\n",
    "    param['subsample'] =0.7#随机采样训练样本\n",
    "    param['colsample_bytree'] = 0.7# 生成树时进行的列采样\n",
    "    param['seed'] = seed_val#随机数种子\n",
    "    num_rounds = num_rounds#迭代次数\n",
    "    \n",
    "    plst = list(param.items())\n",
    "    xgtrain = xgb.DMatrix(train_X,label=train_y)\n",
    "    \n",
    "    if test_y is not None:\n",
    "        xgtest = xgb.DMatrix(test_X,label=test_y)\n",
    "        watchlist = [(xgtrain,'train'),(xgtest,'test')]\n",
    "        model = xgb.train(plst,xgtrain,num_rounds,watchlist,early_stopping_rounds=20)\n",
    "      #  early_stopping_rounds 当设置的迭代次数较大时，early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练\n",
    "    else:\n",
    "        xgtest = xgb.DMatrix(test_X)\n",
    "        model = xgb.train(plst,xgtrain,num_rounds)\n",
    "    pred_test_y = model.predict(xgtest)\n",
    "    return pred_test_y,model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/admin/anaconda3/lib/python3.6/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n",
      "  \"This module will be removed in 0.20.\", DeprecationWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0]\ttrain-mlogloss:1.50833\ttest-mlogloss:1.50841\n",
      "Multiple eval metrics have been passed: 'test-mlogloss' will be used for early stopping.\n",
      "\n",
      "Will train until test-mlogloss hasn't improved in 20 rounds.\n",
      "[1]\ttrain-mlogloss:1.29936\ttest-mlogloss:1.29937\n",
      "[2]\ttrain-mlogloss:1.13418\ttest-mlogloss:1.13421\n",
      "[3]\ttrain-mlogloss:0.999324\ttest-mlogloss:0.999398\n",
      "[4]\ttrain-mlogloss:0.885761\ttest-mlogloss:0.886069\n",
      "[5]\ttrain-mlogloss:0.788353\ttest-mlogloss:0.788831\n",
      "[6]\ttrain-mlogloss:0.704186\ttest-mlogloss:0.704819\n",
      "[7]\ttrain-mlogloss:0.631084\ttest-mlogloss:0.63187\n",
      "[8]\ttrain-mlogloss:0.566549\ttest-mlogloss:0.567496\n",
      "[9]\ttrain-mlogloss:0.510023\ttest-mlogloss:0.511081\n",
      "[10]\ttrain-mlogloss:0.460195\ttest-mlogloss:0.461377\n",
      "[11]\ttrain-mlogloss:0.415059\ttest-mlogloss:0.416472\n",
      "[12]\ttrain-mlogloss:0.375168\ttest-mlogloss:0.376751\n",
      "[13]\ttrain-mlogloss:0.339361\ttest-mlogloss:0.341116\n",
      "[14]\ttrain-mlogloss:0.307289\ttest-mlogloss:0.309229\n",
      "[15]\ttrain-mlogloss:0.278888\ttest-mlogloss:0.28099\n",
      "[16]\ttrain-mlogloss:0.253223\ttest-mlogloss:0.25548\n",
      "[17]\ttrain-mlogloss:0.22989\ttest-mlogloss:0.23232\n",
      "[18]\ttrain-mlogloss:0.209231\ttest-mlogloss:0.211792\n",
      "[19]\ttrain-mlogloss:0.190286\ttest-mlogloss:0.193108\n",
      "[20]\ttrain-mlogloss:0.173177\ttest-mlogloss:0.176191\n",
      "[21]\ttrain-mlogloss:0.158042\ttest-mlogloss:0.161257\n",
      "[22]\ttrain-mlogloss:0.1442\ttest-mlogloss:0.147571\n",
      "[23]\ttrain-mlogloss:0.131558\ttest-mlogloss:0.135145\n",
      "[24]\ttrain-mlogloss:0.120315\ttest-mlogloss:0.12407\n",
      "[25]\ttrain-mlogloss:0.110046\ttest-mlogloss:0.114024\n",
      "[26]\ttrain-mlogloss:0.100722\ttest-mlogloss:0.10492\n",
      "[27]\ttrain-mlogloss:0.092318\ttest-mlogloss:0.096685\n",
      "[28]\ttrain-mlogloss:0.08457\ttest-mlogloss:0.089205\n",
      "[29]\ttrain-mlogloss:0.07759\ttest-mlogloss:0.082367\n",
      "[30]\ttrain-mlogloss:0.071286\ttest-mlogloss:0.076213\n",
      "[31]\ttrain-mlogloss:0.065643\ttest-mlogloss:0.070743\n",
      "[32]\ttrain-mlogloss:0.060401\ttest-mlogloss:0.065703\n",
      "[33]\ttrain-mlogloss:0.05568\ttest-mlogloss:0.061245\n",
      "[34]\ttrain-mlogloss:0.051388\ttest-mlogloss:0.057143\n",
      "[35]\ttrain-mlogloss:0.047568\ttest-mlogloss:0.053462\n",
      "[36]\ttrain-mlogloss:0.044057\ttest-mlogloss:0.050108\n",
      "[37]\ttrain-mlogloss:0.040903\ttest-mlogloss:0.047101\n",
      "[38]\ttrain-mlogloss:0.038017\ttest-mlogloss:0.044361\n",
      "[39]\ttrain-mlogloss:0.035355\ttest-mlogloss:0.041848\n",
      "[40]\ttrain-mlogloss:0.032942\ttest-mlogloss:0.039616\n",
      "[41]\ttrain-mlogloss:0.030704\ttest-mlogloss:0.037537\n",
      "[42]\ttrain-mlogloss:0.02866\ttest-mlogloss:0.035618\n",
      "[43]\ttrain-mlogloss:0.026807\ttest-mlogloss:0.03393\n",
      "[44]\ttrain-mlogloss:0.025135\ttest-mlogloss:0.032328\n",
      "[45]\ttrain-mlogloss:0.023627\ttest-mlogloss:0.030966\n",
      "[46]\ttrain-mlogloss:0.022218\ttest-mlogloss:0.029696\n",
      "[47]\ttrain-mlogloss:0.020947\ttest-mlogloss:0.028575\n",
      "[48]\ttrain-mlogloss:0.019765\ttest-mlogloss:0.027548\n",
      "[49]\ttrain-mlogloss:0.018715\ttest-mlogloss:0.026637\n",
      "[50]\ttrain-mlogloss:0.017736\ttest-mlogloss:0.025799\n",
      "[51]\ttrain-mlogloss:0.016842\ttest-mlogloss:0.025011\n",
      "[52]\ttrain-mlogloss:0.015995\ttest-mlogloss:0.02434\n",
      "[53]\ttrain-mlogloss:0.015238\ttest-mlogloss:0.023703\n",
      "[54]\ttrain-mlogloss:0.014496\ttest-mlogloss:0.023094\n",
      "[55]\ttrain-mlogloss:0.013853\ttest-mlogloss:0.022536\n",
      "[56]\ttrain-mlogloss:0.013219\ttest-mlogloss:0.021991\n",
      "[57]\ttrain-mlogloss:0.012652\ttest-mlogloss:0.021516\n",
      "[58]\ttrain-mlogloss:0.012152\ttest-mlogloss:0.021099\n",
      "[59]\ttrain-mlogloss:0.011656\ttest-mlogloss:0.020766\n",
      "[60]\ttrain-mlogloss:0.011209\ttest-mlogloss:0.020397\n",
      "[61]\ttrain-mlogloss:0.010812\ttest-mlogloss:0.020107\n",
      "[62]\ttrain-mlogloss:0.010409\ttest-mlogloss:0.019795\n",
      "[63]\ttrain-mlogloss:0.010019\ttest-mlogloss:0.019504\n",
      "[64]\ttrain-mlogloss:0.009684\ttest-mlogloss:0.019246\n",
      "[65]\ttrain-mlogloss:0.009365\ttest-mlogloss:0.019022\n",
      "[66]\ttrain-mlogloss:0.009064\ttest-mlogloss:0.018842\n",
      "[67]\ttrain-mlogloss:0.008746\ttest-mlogloss:0.018643\n",
      "[68]\ttrain-mlogloss:0.008495\ttest-mlogloss:0.018508\n",
      "[69]\ttrain-mlogloss:0.008267\ttest-mlogloss:0.018368\n",
      "[70]\ttrain-mlogloss:0.008066\ttest-mlogloss:0.018242\n",
      "[71]\ttrain-mlogloss:0.007847\ttest-mlogloss:0.018093\n",
      "[72]\ttrain-mlogloss:0.007639\ttest-mlogloss:0.017966\n",
      "[73]\ttrain-mlogloss:0.007436\ttest-mlogloss:0.017866\n",
      "[74]\ttrain-mlogloss:0.007227\ttest-mlogloss:0.017773\n",
      "[75]\ttrain-mlogloss:0.007085\ttest-mlogloss:0.017679\n",
      "[76]\ttrain-mlogloss:0.006884\ttest-mlogloss:0.017587\n",
      "[77]\ttrain-mlogloss:0.006736\ttest-mlogloss:0.017508\n",
      "[78]\ttrain-mlogloss:0.006597\ttest-mlogloss:0.017442\n",
      "[79]\ttrain-mlogloss:0.006448\ttest-mlogloss:0.017385\n",
      "[80]\ttrain-mlogloss:0.006316\ttest-mlogloss:0.017312\n",
      "[81]\ttrain-mlogloss:0.006182\ttest-mlogloss:0.017261\n",
      "[82]\ttrain-mlogloss:0.006063\ttest-mlogloss:0.017233\n",
      "[83]\ttrain-mlogloss:0.005938\ttest-mlogloss:0.017188\n",
      "[84]\ttrain-mlogloss:0.005827\ttest-mlogloss:0.017114\n",
      "[85]\ttrain-mlogloss:0.005675\ttest-mlogloss:0.01707\n",
      "[86]\ttrain-mlogloss:0.005551\ttest-mlogloss:0.016989\n",
      "[87]\ttrain-mlogloss:0.005425\ttest-mlogloss:0.016921\n",
      "[88]\ttrain-mlogloss:0.005321\ttest-mlogloss:0.016883\n",
      "[89]\ttrain-mlogloss:0.005237\ttest-mlogloss:0.016879\n",
      "[90]\ttrain-mlogloss:0.005157\ttest-mlogloss:0.016845\n",
      "[91]\ttrain-mlogloss:0.005068\ttest-mlogloss:0.016813\n",
      "[92]\ttrain-mlogloss:0.004989\ttest-mlogloss:0.016755\n",
      "[93]\ttrain-mlogloss:0.004894\ttest-mlogloss:0.016742\n",
      "[94]\ttrain-mlogloss:0.004811\ttest-mlogloss:0.016703\n",
      "[95]\ttrain-mlogloss:0.004733\ttest-mlogloss:0.016675\n",
      "[96]\ttrain-mlogloss:0.004659\ttest-mlogloss:0.016656\n",
      "[97]\ttrain-mlogloss:0.004592\ttest-mlogloss:0.016641\n",
      "[98]\ttrain-mlogloss:0.004518\ttest-mlogloss:0.016628\n",
      "[99]\ttrain-mlogloss:0.004455\ttest-mlogloss:0.016637\n",
      "[100]\ttrain-mlogloss:0.004397\ttest-mlogloss:0.01662\n",
      "[101]\ttrain-mlogloss:0.004306\ttest-mlogloss:0.01659\n",
      "[102]\ttrain-mlogloss:0.004221\ttest-mlogloss:0.01656\n",
      "[103]\ttrain-mlogloss:0.004139\ttest-mlogloss:0.016546\n",
      "[104]\ttrain-mlogloss:0.004077\ttest-mlogloss:0.016548\n",
      "[105]\ttrain-mlogloss:0.004011\ttest-mlogloss:0.016517\n",
      "[106]\ttrain-mlogloss:0.003957\ttest-mlogloss:0.016501\n",
      "[107]\ttrain-mlogloss:0.003902\ttest-mlogloss:0.016487\n",
      "[108]\ttrain-mlogloss:0.003855\ttest-mlogloss:0.016486\n",
      "[109]\ttrain-mlogloss:0.003807\ttest-mlogloss:0.016466\n",
      "[110]\ttrain-mlogloss:0.003757\ttest-mlogloss:0.016458\n",
      "[111]\ttrain-mlogloss:0.003697\ttest-mlogloss:0.016452\n",
      "[112]\ttrain-mlogloss:0.003651\ttest-mlogloss:0.016443\n",
      "[113]\ttrain-mlogloss:0.003603\ttest-mlogloss:0.016414\n",
      "[114]\ttrain-mlogloss:0.003555\ttest-mlogloss:0.016387\n",
      "[115]\ttrain-mlogloss:0.003511\ttest-mlogloss:0.016416\n",
      "[116]\ttrain-mlogloss:0.003468\ttest-mlogloss:0.016399\n",
      "[117]\ttrain-mlogloss:0.003428\ttest-mlogloss:0.016395\n",
      "[118]\ttrain-mlogloss:0.00338\ttest-mlogloss:0.016381\n",
      "[119]\ttrain-mlogloss:0.003337\ttest-mlogloss:0.016373\n",
      "[120]\ttrain-mlogloss:0.003301\ttest-mlogloss:0.016382\n",
      "[121]\ttrain-mlogloss:0.003253\ttest-mlogloss:0.016391\n",
      "[122]\ttrain-mlogloss:0.003207\ttest-mlogloss:0.016365\n",
      "[123]\ttrain-mlogloss:0.003175\ttest-mlogloss:0.016371\n",
      "[124]\ttrain-mlogloss:0.003143\ttest-mlogloss:0.016388\n",
      "[125]\ttrain-mlogloss:0.003108\ttest-mlogloss:0.016381\n",
      "[126]\ttrain-mlogloss:0.003077\ttest-mlogloss:0.016374\n",
      "[127]\ttrain-mlogloss:0.00305\ttest-mlogloss:0.016375\n",
      "[128]\ttrain-mlogloss:0.003018\ttest-mlogloss:0.016346\n",
      "[129]\ttrain-mlogloss:0.002984\ttest-mlogloss:0.016339\n",
      "[130]\ttrain-mlogloss:0.002958\ttest-mlogloss:0.016317\n",
      "[131]\ttrain-mlogloss:0.002923\ttest-mlogloss:0.016302\n",
      "[132]\ttrain-mlogloss:0.002893\ttest-mlogloss:0.016304\n",
      "[133]\ttrain-mlogloss:0.002863\ttest-mlogloss:0.016294\n",
      "[134]\ttrain-mlogloss:0.002825\ttest-mlogloss:0.016301\n",
      "[135]\ttrain-mlogloss:0.002799\ttest-mlogloss:0.016312\n",
      "[136]\ttrain-mlogloss:0.002762\ttest-mlogloss:0.016329\n",
      "[137]\ttrain-mlogloss:0.002742\ttest-mlogloss:0.016316\n",
      "[138]\ttrain-mlogloss:0.002719\ttest-mlogloss:0.016324\n",
      "[139]\ttrain-mlogloss:0.002692\ttest-mlogloss:0.016312\n",
      "[140]\ttrain-mlogloss:0.002667\ttest-mlogloss:0.016305\n",
      "[141]\ttrain-mlogloss:0.002642\ttest-mlogloss:0.016292\n",
      "[142]\ttrain-mlogloss:0.002616\ttest-mlogloss:0.016283\n",
      "[143]\ttrain-mlogloss:0.002592\ttest-mlogloss:0.016278\n",
      "[144]\ttrain-mlogloss:0.002569\ttest-mlogloss:0.016292\n",
      "[145]\ttrain-mlogloss:0.002552\ttest-mlogloss:0.016304\n",
      "[146]\ttrain-mlogloss:0.002535\ttest-mlogloss:0.016293\n",
      "[147]\ttrain-mlogloss:0.002507\ttest-mlogloss:0.016296\n",
      "[148]\ttrain-mlogloss:0.002487\ttest-mlogloss:0.016306\n",
      "[149]\ttrain-mlogloss:0.002465\ttest-mlogloss:0.016326\n",
      "[150]\ttrain-mlogloss:0.002448\ttest-mlogloss:0.016337\n",
      "[151]\ttrain-mlogloss:0.002428\ttest-mlogloss:0.016343\n",
      "[152]\ttrain-mlogloss:0.002403\ttest-mlogloss:0.016322\n",
      "[153]\ttrain-mlogloss:0.002385\ttest-mlogloss:0.01631\n",
      "[154]\ttrain-mlogloss:0.002367\ttest-mlogloss:0.01632\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[155]\ttrain-mlogloss:0.002352\ttest-mlogloss:0.016313\n",
      "[156]\ttrain-mlogloss:0.002333\ttest-mlogloss:0.01631\n",
      "[157]\ttrain-mlogloss:0.00232\ttest-mlogloss:0.01634\n",
      "[158]\ttrain-mlogloss:0.002305\ttest-mlogloss:0.016358\n",
      "[159]\ttrain-mlogloss:0.002289\ttest-mlogloss:0.016372\n",
      "[160]\ttrain-mlogloss:0.002273\ttest-mlogloss:0.016381\n",
      "[161]\ttrain-mlogloss:0.002257\ttest-mlogloss:0.016381\n",
      "[162]\ttrain-mlogloss:0.002238\ttest-mlogloss:0.016399\n",
      "[163]\ttrain-mlogloss:0.002225\ttest-mlogloss:0.016408\n",
      "Stopping. Best iteration:\n",
      "[143]\ttrain-mlogloss:0.002592\ttest-mlogloss:0.016278\n",
      "\n"
     ]
    }
   ],
   "source": [
    "#single model example1:xgboost\n",
    "import xgboost as xgb\n",
    "from sklearn import model_selection,preprocessing,ensemble\n",
    "from sklearn.cross_validation import train_test_split\n",
    "from sklearn.metrics import log_loss\n",
    "cv_scores = []\n",
    "x_train, x_valid, y_train, y_valid = train_test_split(train_X, train_y, test_size=0.2, random_state=0)\n",
    "pred_test_y,model=runXGB(x_train,y_train,x_valid,y_valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Cross validation\n",
      "{'fit_time': array([7.6434083 , 7.86441493, 7.27798533]), 'score_time': array([0.95841122, 0.97256565, 1.01796508]), 'test_score': array([-0.06938313, -0.06782988, -0.07709167]), 'train_score': array([-0.00638233, -0.00646153, -0.00619394])}\n"
     ]
    }
   ],
   "source": [
    "#single model example2:randomforest for baseline \n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "rf = RandomForestClassifier(n_jobs=-1,random_state=0)\n",
    "rf = rf.fit(x_train, y_train)\n",
    "print('Cross validation')\n",
    "cv_res = cross_validate(rf, train_X,train_y, scoring='neg_log_loss', return_train_score=True)\n",
    "print(cv_res)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
