{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "finish\n",
      "finish\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import os\n",
    "import numpy as np\n",
    "import datetime\n",
    "import time\n",
    "# 1505145600  2017 年9月12\n",
    "\n",
    "times_zl = 1505145600\n",
    "\n",
    "def get_all_train_data():\n",
    "    df = pd.read_csv('../data/train/action_train.csv')\n",
    "    df = df.sort_values(by='actionTime',ascending=True)\n",
    "    return df\n",
    "def get_all_test_data():\n",
    "    df = pd.read_csv('../data/test/action_test.csv')\n",
    "    df = df.sort_values(by='actionTime',ascending=True)\n",
    "    return df\n",
    "\n",
    "def get_orderHistory(train):\n",
    "    if(train=='train'):\n",
    "        df = pd.read_csv('../data/train/orderHistory_train.csv')\n",
    "    else:\n",
    "        df = pd.read_csv('../data/test/orderHistory_test.csv')\n",
    "    df = df.sort_values(by=['orderTime','orderType'],ascending=[1,1])\n",
    "    df = df.drop_duplicates(['userid','orderTime'],keep='last')\n",
    "    return df  \n",
    "\n",
    "#上海，北京，广东，浙江，江苏，天津\n",
    "# 福建，山东，辽宁，四川，重庆，湖北，湖南，海南，内蒙古，山西\n",
    "# 河南，陕西，云南，黑龙江，安徽，贵州，宁夏，广西，吉林，江西，河北\n",
    "# 甘肃，新疆，青海，西藏\n",
    "def gender_map(x):\n",
    "    if x=='男':\n",
    "        return 1\n",
    "    elif x=='女':\n",
    "        return 2\n",
    "    else:\n",
    "        return 0 \n",
    "def age_map(x):\n",
    "    if x=='60后':\n",
    "        return 1\n",
    "    elif x=='70后':\n",
    "        return 2\n",
    "    elif x=='80后':\n",
    "        return 3\n",
    "    elif x=='90后':\n",
    "        return 4\n",
    "    elif x=='00后':\n",
    "        return 5\n",
    "    else:\n",
    "        return 0\n",
    "def province_map(x):\n",
    "    if x=='上海' or x== '北京' or x== '广东' or x== '浙江' or x== '江苏' or x== '天津' or x== '辽宁':\n",
    "        return 1\n",
    "    elif x=='福建' or x== '山东'  or x== '四川' or x== '重庆' or x== '湖北' or x== '湖南'  or x== '内蒙古' or x== '山西':\n",
    "        return 2\n",
    "    elif x=='河南' or x== '陕西' or x== '云南' or x== '黑龙江' or x== '安徽' or x== '广西' or x== '吉林' or x== '江西' or x== '河北':\n",
    "        return 3\n",
    "    elif x=='甘肃' or x== '新疆' or x== '青海' or x== '西藏' or x== '贵州' or x== '宁夏' or x== '海南':\n",
    "        return 4\n",
    "    else:\n",
    "        return 0\n",
    "def get_userprofile_map(train):\n",
    "    df = get_userProfile(train)\n",
    "    df['gender'] = df['gender'].map(gender_map)\n",
    "    df['age'] = df['age'].map(age_map)\n",
    "    df['province'] = df['province'].map(province_map)\n",
    "    return df\n",
    "def get_userProfile(train):\n",
    "    if(train=='train'):\n",
    "        df = pd.read_csv('../data/train/userProfile_train.csv')\n",
    "    else:\n",
    "        df = pd.read_csv('../data/test/userProfile_test.csv')\n",
    "    return df\n",
    "def last_1(x):\n",
    "    return x[-1:].mean()\n",
    "def last_2(x):\n",
    "    return x[-2:-1].mean()\n",
    "def last_3(x):\n",
    "    return x[-3:-2].mean()\n",
    "def first_1(x):\n",
    "    return x[:1].mean()\n",
    "def first_2(x):\n",
    "    return x[1:2].mean()\n",
    "def first_3(x):\n",
    "    return x[2:3].mean()\n",
    "def diff_mean(x):\n",
    "    x = x.diff()\n",
    "#     x = x/(3600*24)\n",
    "    return x.mean()\n",
    "def diff_min(x):\n",
    "    x = x.diff()\n",
    "#     x = x/(3600*24)\n",
    "    return x.min()\n",
    "def diff_max(x):\n",
    "    x = x.diff()\n",
    "#     x = x/(3600*24)\n",
    "    return x.max()\n",
    "def diff_std(x):\n",
    "    x = x.diff()\n",
    "#     x = x/(3600*24)\n",
    "    return x.std()\n",
    "def diff_last_1(x):\n",
    "    x = x.diff()\n",
    "#     x = x/(3600*24)\n",
    "    return x[-1:].mean()\n",
    "def diff_last_2(x):\n",
    "    x = x.diff()\n",
    "#     x = x/(3600*24)\n",
    "    return x[-2:-1].mean()\n",
    "def diff_last_3(x):\n",
    "    x = x.diff()\n",
    "#     x = x/(3600*24)\n",
    "    return x[-3:-2].mean()\n",
    "\n",
    "def diff_first_4(x):\n",
    "    x = x.diff()\n",
    "    return x[3:4].mean()\n",
    "\n",
    "def diff_first_2(x):\n",
    "    x = x.diff()\n",
    "    return x[1:2].mean()\n",
    "\n",
    "def diff_first_3(x):\n",
    "    x = x.diff()\n",
    "    return x[2:3].mean()\n",
    "\n",
    "def  diff_last_3_mean(x):\n",
    "    x = x.diff()\n",
    "#     x = x/(3600*24)\n",
    "    x = x[-3:]\n",
    "    return x.mean()\n",
    "def diff_last_3_std(x):\n",
    "    x = x.diff()\n",
    "#     x = x/(3600*24)\n",
    "    x = x[-3:]\n",
    "    return x.std()\n",
    "\n",
    "\n",
    "#获取用户开始三次、最后三次次订单的时间距离当前的时间和类型\n",
    "def get_feat_1(train):\n",
    "    dump_path = '../cache/get_feat_1_%s_2.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df['actionTime'] = 1505145600- df['actionTime']\n",
    "#         df['actionTime'] = df['actionTime']/(3600*24) \n",
    "#         actions = df[['userid','actionType','get_feat_1_actions']]\n",
    "        \n",
    "        actions_1 = df[['userid','actionType']].groupby(['userid'])['actionType']\\\n",
    "                    .agg({\n",
    "                        'feat_1_last_1_type':last_1,\n",
    "                        'feat_1_last_2_type':last_2,\n",
    "                        'feat_1_last_3_type':last_3,\n",
    "                        'feat_1_first_1_type':first_1,\n",
    "                        'feat_1_first_2_type':first_2,\n",
    "                        'feat_1_first_3_type':first_3\n",
    "                        \n",
    "                    })\n",
    "        actions_1 = actions_1.reset_index()\n",
    "#         print(actions_1.columns)\n",
    "        actions_2 = df[['userid','actionTime']].groupby(['userid'])['actionTime']\\\n",
    "                    .agg({\n",
    "                        'feat_1_last_1_time':last_1,\n",
    "                        'feat_1_last_2_time':last_2,\n",
    "                        'feat_1_last_3_time':last_3,\n",
    "                        'feat_1_first_1_time':first_1,\n",
    "                        'feat_1_first_2_time':first_2,\n",
    "                        'feat_1_first_3_time':first_3,\n",
    "                        \n",
    "                        \n",
    "                    })\n",
    "        actions_2 = actions_2.reset_index()\n",
    "#         print(actions_2.columns)\n",
    "        actions = pd.merge(actions_1,actions_2,on='userid',how='left')\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "#获取每个type的个数和比例     \n",
    "def get_feat_2(train):\n",
    "    dump_path='../cache/get_feat_2_%s.csv'%train\n",
    "    if  os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        actions_1 = pd.get_dummies(df['actionType'],prefix='actions_2_type')\n",
    "        actions = pd.concat([df[['userid']],actions_1],axis=1)\n",
    "        actions = actions.groupby(['userid'],as_index=False).sum()\n",
    "        actions['actions_2_type']=0\n",
    "        for i in range(1,10,1):\n",
    "            actions['actions_2_type']=actions['actions_2_type']+actions['actions_2_type_'+str(i)]\n",
    "        for i in range(1,10,1):\n",
    "            actions['actions_2_type_rate_'+str(i)]=actions['actions_2_type_'+str(i)]/actions['actions_2_type']\n",
    "#             del actions['actions_2_type_'+str(i)]\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "def diff_first_1(x):\n",
    "    return x[1:2].mean()\n",
    "\n",
    "def diff_first_2(x):\n",
    "    return x[2:3].mean()\n",
    "\n",
    "def diff_first_3(x):\n",
    "    x =x.diff()\n",
    "    return x[3:4].mean()\n",
    "#获取相邻时间的方差和均值\n",
    "def get_feat_3(train):\n",
    "    dump_path ='../cache/get_feat_3_%s_1.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        actions = df[['userid','actionTime']].groupby(['userid'])['actionTime'].agg({\n",
    "                                                                      'feat_3_mean':diff_mean,\n",
    "                                                                      'feat_3_std':diff_std,\n",
    "#                                                                       'feat_3_max':diff_max,\n",
    "                                                                      'feat_3_min':diff_min,\n",
    "                                                                      'feat_3_last_1':diff_last_1,\n",
    "                                                                      'feat_3_last_2':diff_last_2,\n",
    "                                                                      'feat_3_last_3':diff_last_3,\n",
    "            \n",
    "                                                                      'feat_3_first_1': diff_first_1,\n",
    "                                                                      'feat_3_first_2': diff_first_2,\n",
    "                                                                      'feat_3_first_3': diff_first_3,\n",
    "            \n",
    "                                                                     \n",
    "                                                                    })\n",
    "        actions = actions.reset_index()\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "#获取每个状态的相邻时间的均值和时间\n",
    "def get_feat_4(train):\n",
    "    dump_path='../cache/get_feat_4_%s_zl_1_1.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "\n",
    "        df=df[(df['actionType']==1) | (df['actionType']==5) |(df['actionType']==6)|(df['actionType']==7)]\n",
    "        df = df[['userid','actionType','actionTime']].groupby(['userid','actionType'])['actionTime']\\\n",
    "                                                            .agg({\n",
    "                                                                  'feat_4_mean':diff_mean,\n",
    "                                                                  'feat_4_std':diff_std,\n",
    "    #                                                           \n",
    "                                                                  'feat_4_min':diff_min,\n",
    "                                                                  'feat_4_last_1':diff_last_1,\n",
    "                                                                  'feat_4_first_1': diff_first_1,\n",
    "                                                                \n",
    "                                                                 'feat_4_last_1_time':last_1,\n",
    "                                                                'feat_4_last_2_time':last_2,\n",
    "                                                                'feat_4_last_3_time':last_3,\n",
    "                                                                'feat_4_first_1_time':first_1,\n",
    "                                                                'feat_4_first_1_time':first_1,\n",
    "                                                                'feat_4_first_1_time':first_1,\n",
    "                                                            })\n",
    "        df = df.unstack()\n",
    "        columns=[]\n",
    "        for i in df.columns.levels[0]:\n",
    "            for j in  df.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j))\n",
    "        df.columns = columns\n",
    "        df = df.reset_index() \n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "def get_feat_5(train):\n",
    "    dump_path='../cache/get_feat_5_%s_zl.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df=df[(df['actionType']==1) | (df['actionType']==5) |(df['actionType']==6)|(df['actionType']==7)]\n",
    "#         df = df[ (df['actionType']==5) ]\n",
    "        df = df[['userid','actionType','actionTime']].groupby(['userid','actionType'])['actionTime']\\\n",
    "            .agg({\n",
    "               'feat_5_mean':diff_last_3_mean,\n",
    "               'feat_5_std':diff_last_3_std\n",
    "#                'feat_5_std':diff_std,\n",
    "#                'feat_5_last':diff_last_1\n",
    "            })\n",
    "        df = df.unstack()\n",
    "        columns=[]\n",
    "        for i in df.columns.levels[0]:\n",
    "            for j in  df.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j))\n",
    "        df.columns = columns\n",
    "        df = df.reset_index()\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "    \n",
    "def get_feat_8(train):\n",
    "    dump_path = '../cache/get_feat_8_%s_5.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df=df[(df['actionType']==1) | (df['actionType']==5) |(df['actionType']==6)|(df['actionType']==7)]\n",
    "        df['actionTime'] = 1505145600- df['actionTime']\n",
    "#         df['actionTime'] = df['actionTime']/(3600*24)\n",
    "\n",
    "        df = df[['userid','actionType','actionTime']].groupby(['userid','actionType'])['actionTime']\\\n",
    "                    .agg({\n",
    "                        'feat_8_last_1_time':last_1,\n",
    "#                         'feat_8_last_2_time':last_2,\n",
    "#                         'feat_8_last_3_time':last_3,\n",
    "#                         'feat_8_first_1_time':first_1,\n",
    "#                         'feat_8_first_2_time':first_2,\n",
    "#                         'feat_8_first_3_time':first_3\n",
    "                    })\n",
    "            \n",
    "        df = df.unstack()\n",
    "        columns=[]\n",
    "        for i in df.columns.levels[0]:\n",
    "            for j in  df.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j))\n",
    "        df.columns = columns\n",
    "        df = df.reset_index()\n",
    "#         print(actions_2.columns)\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "    \n",
    "    \n",
    "    \n",
    "def get_feat_9(train):\n",
    "    dump_path='../cache/get_feat_9_%s.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()  \n",
    "       \n",
    "        df_1 = get_orderHistory(train)\n",
    "        df_1 = df_1[['userid','orderTime']].drop_duplicates(['userid'],keep='last')\n",
    "        df_1.columns=['userid','start_time']\n",
    "\n",
    "        actions = pd.merge(df,df_1,on=['userid'],how='left')\n",
    "        actions = actions[actions['actionTime']>actions['start_time']]\n",
    "        actions = actions[['userid','actionTime']].groupby(['userid'])['actionTime']\\\n",
    "                    .agg({\n",
    "                       'feat_9_mean':feat_3_mean,\n",
    "                       'feat_9_min':feat_3_min,\n",
    "                       'feat_9_max':feat_3_max,\n",
    "                       'feat_9_std':feat_3_std,\n",
    "                    })\n",
    "#         actions = actions.unstack()\n",
    "#         actions.columns = ['get_feat_9_'+str(i+1) for i in range(len(actions.columns))]\n",
    "        actions = actions.reset_index()\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions   \n",
    "    \n",
    "def get_feat_6(train):\n",
    "    dump_path='../cache/get_feat_6_%s_1.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        df = get_orderHistory(train)\n",
    "        df_1 = df[df['orderType']==0][['userid','orderTime']].groupby(['userid'],as_index=False).count()\n",
    "        df_1.columns = ['userid','order_0_nums']\n",
    "        \n",
    "        df_2 = df[df['orderType']==1][['userid','orderTime']].groupby(['userid'],as_index=False).count()\n",
    "        df_2.columns = ['userid','order_1_nums'] \n",
    "        \n",
    "        actions = pd.merge(df_1,df_2,on='userid',how='left')\n",
    "        actions['rate_oreder_1'] = actions['order_1_nums']/(actions['order_0_nums'] +actions['order_1_nums'])\n",
    "        actions = actions.fillna(0)\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "#\n",
    "def get_feat_7(train):\n",
    "    dump_path='../cache/get_feat_7_%s.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        df = get_orderHistory(train)   \n",
    "        df_1 = df[df['orderType']==0][['userid','orderTime']].drop_duplicates(['userid'],keep='last')\n",
    "        df_1['orderTime'] = (1505145600-df_1['orderTime'])\n",
    "        df_1.columns =['userid','time_interval_7_0']\n",
    "        \n",
    "        df_2 = df[df['orderType']==1][['userid','orderTime']].drop_duplicates(['userid'],keep='last')\n",
    "        df_2['orderTime'] = (1505145600-df_2['orderTime'])\n",
    "        df_2.columns =['userid','time_interval_7_1']\n",
    "        \n",
    "\n",
    "        actions = pd.merge(df_1,df_2,on='userid',how='outer')\n",
    "#         actions = pd.merge(actions,df_3,on='userid',how='outer')\n",
    "#         actions = pd.merge(actions,df_4,on='userid',how='outer')\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "    \n",
    "\n",
    "    \n",
    "    \n",
    "def get_feat_20(train):\n",
    "    dump_path='../cache/get_feat_20_%s_zl——1.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()   \n",
    "#         df=df[(df['actionType']==1) | (df['actionType']==5) |(df['actionType']==6)|(df['actionType']==7)]    \n",
    "        df_1 = df.drop_duplicates(['userid','actionType'],keep='last')[['userid','actionType','actionTime']]\n",
    "        df_1.columns = ['userid','actionType_last','actionTime_last']\n",
    "        df = pd.merge(df,df_1,on=['userid'],how='left')\n",
    "        df = df[df['actionTime']>df['actionTime_last']]\n",
    "        \n",
    "        df= df[['userid','actionType_last','actionTime']].groupby(['userid','actionType_last'])['actionTime']\\\n",
    "                                        .agg({\n",
    "                                             'feat_20_mean':diff_mean,\n",
    "                                              'feat_20_std':diff_std,\n",
    "#                                               'feat_20_max':diff_max,\n",
    "                                              'feat_20_min':diff_min\n",
    "                                        })\n",
    "        \n",
    "        \n",
    "        \n",
    "#         df = df.reset_index()\n",
    "        df = df.unstack()\n",
    "        columns=[]\n",
    "        for i in df.columns.levels[0]:\n",
    "            for j in  df.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j))\n",
    "        df.columns = columns\n",
    "        df = df.reset_index()\n",
    "#         print(df)\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "def get_feat_21(train):\n",
    "    dump_path='../cache/get_feat_21_%s_1.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        df = get_orderHistory(train)\n",
    "        df = df[df['orderType']==1][['userid','orderType']].groupby(['userid'],as_index=False).count()\n",
    "        df.columns = ['userid','nums_label_1']\n",
    "        \n",
    "        if(train=='train'):\n",
    "            actions = get_all_train_data()\n",
    "        else:\n",
    "            actions = get_all_test_data()\n",
    "        actions_1 = pd.get_dummies(actions['actionType'],prefix='actions_21_type')\n",
    "        actions = pd.concat([actions[['userid']],actions_1],axis=1)\n",
    "        actions = actions.groupby(['userid'],as_index=False).sum()\n",
    "        actions = pd.merge(df,actions,on='userid',how='left')\n",
    "        for i in range(1,10,1):\n",
    "            actions['actions_21_type_rate_'+str(i)]=actions['nums_label_1']/actions['actions_21_type_'+str(i)]\n",
    "            del actions['actions_21_type_'+str(i)]\n",
    "        del actions['nums_label_1']\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "\n",
    "\n",
    "    \n",
    "def get_feat_22_1567(train):\n",
    "    dump_path='../cache/get_feat_22_%s_2_1.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df=df[(df['actionType']==1) | (df['actionType']==5) |(df['actionType']==6)|(df['actionType']==7)] \n",
    "        df['date'] = df['actionTime'].map(lambda x:datetime.datetime.utcfromtimestamp(x).strftime('%Y-%m-%d'))\n",
    "        actions_1 = df[['date','userid']].groupby('userid',as_index=False).count()\n",
    "        actions_1.columns =['userid','nums_all']\n",
    "        \n",
    "#         actions_2 = df[['date','userid']].groupby('userid',as_index=False).count()\n",
    "        actions_2 = df[['date','userid']].drop_duplicates(['date','userid'],keep='last')\n",
    "        actions_2 = actions_2[['date','userid']].groupby('userid',as_index=False).count()\n",
    "        actions_2.columns =['userid','day_nums']\n",
    "        \n",
    "        actions = pd.merge(actions_1,actions_2,on='userid',how='left')\n",
    "        actions['rate_22_1']= actions['nums_all']/actions['day_nums']\n",
    "        del actions['nums_all']\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "    \n",
    "def get_feat_22(train):\n",
    "    dump_path='../cache/get_feat_22_%s_2.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df['date'] = df['actionTime'].map(lambda x:datetime.datetime.utcfromtimestamp(x).strftime('%Y-%m-%d'))\n",
    "        actions_1 = df[['date','userid']].groupby('userid',as_index=False).count()\n",
    "        actions_1.columns =['userid','nums_all']\n",
    "        \n",
    "#         actions_2 = df[['date','userid']].groupby('userid',as_index=False).count()\n",
    "        actions_2 = df[['date','userid']].drop_duplicates(['date','userid'],keep='last')\n",
    "        actions_2 = actions_2[['date','userid']].groupby('userid',as_index=False).count()\n",
    "        actions_2.columns =['userid','day_nums']\n",
    "        \n",
    "        actions = pd.merge(actions_1,actions_2,on='userid',how='left')\n",
    "        actions['rate_22_1']= actions['nums_all']/actions['day_nums']\n",
    "        del actions['nums_all']\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "    \n",
    "def get_feat_23(train):\n",
    "    dump_path='../cache/get_feat_23_%s_2.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df=df[(df['actionType']==1) | (df['actionType']==5) |(df['actionType']==6)|(df['actionType']==7)]   \n",
    "        df['date'] = df['actionTime'].map(lambda x:datetime.datetime.utcfromtimestamp(x).strftime('%Y-%m-%d'))\n",
    "        actions_1 = df[['date','userid','actionType']].groupby(['userid','actionType']).count()\n",
    "        actions_1 = actions_1.unstack()\n",
    "        columns=[]\n",
    "        for i in actions_1.columns.levels[0]:\n",
    "            for j in  actions_1.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j)+\"_nums\")\n",
    "        actions_1.columns = columns\n",
    "        actions_1 =actions_1.reset_index()\n",
    "#         actions_2 = df[['date','userid']].groupby('userid',as_index=False).count()\n",
    "        actions_2 = df[['date','userid','actionType']].drop_duplicates(['date','userid','actionType'],keep='last')\n",
    "        actions_2 = actions_2[['date','userid','actionType']].groupby(['userid','actionType']).count()\n",
    "        actions_2 = actions_2.unstack()\n",
    "        columns=[]\n",
    "        \n",
    "        cols_x =actions_2.columns.levels[0]\n",
    "        cols_y =actions_2.columns.levels[1]\n",
    "        for i in actions_2.columns.levels[0]:\n",
    "            for j in  actions_2.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j)+\"_days\")\n",
    "        actions_2.columns = columns\n",
    "        actions_2 = actions_2.reset_index()\n",
    "        actions = pd.merge(actions_1,actions_2,on=['userid'],how='left')\n",
    "        for i in cols_x:\n",
    "            for j in  cols_y:\n",
    "                actions[str(i)+'_'+str(j)+'rate_22_1']= actions[str(i)+'_'+str(j)+\"_nums\"]/actions[str(i)+'_'+str(j)+\"_days\"]\n",
    "                del actions[str(i)+'_'+str(j)+\"_nums\"]\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "    \n",
    "\n",
    "def get_feat_26(train):\n",
    "    dump_path='../cache/get_feat_26_%s_2.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "#         df=df.head(100)\n",
    "        df=df[(df['actionType']==1) | (df['actionType']==5) |(df['actionType']==6)|(df['actionType']==7)] \n",
    "        df['date'] = df['actionTime'].map(lambda x:datetime.datetime.utcfromtimestamp(x).strftime('%Y-%m-%d'))\n",
    "        actions_1 = df[['date','actionType','userid']].groupby(['userid','actionType'],as_index=False).count()\n",
    "        actions_1.columns =['userid','actionType','nums_all_26']\n",
    "        \n",
    "#         actions_2 = df[['date','userid']].groupby('userid',as_index=False).count()\n",
    "        actions_2 = df[['date','actionType','userid']].drop_duplicates(['date','actionType','userid'],keep='last')\n",
    "        actions_2 = actions_2[['date','actionType','userid']].groupby(['userid','actionType'],as_index=False).count()\n",
    "        actions_2.columns =['userid','actionType','day_nums_26']\n",
    "        \n",
    "        actions = pd.merge(actions_1,actions_2,on=['userid','actionType'],how='left')\n",
    "        actions['rate_26_1']= actions['nums_all_26']/actions['day_nums_26']\n",
    "        del actions['nums_all_26']\n",
    "        actions = actions.groupby(['userid','actionType']).mean()\n",
    "        actions = actions.unstack()\n",
    "        columns=[]\n",
    "        for i in actions.columns.levels[0]:\n",
    "            for j in  actions.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j)+\"_nums\")\n",
    "        actions.columns = columns\n",
    "        actions =actions.reset_index()\n",
    "#         print(actions.head(20))\n",
    "#         print(actions.columns)\n",
    "        \n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "def get_feat_27(train):\n",
    "    dump_path='../cache/get_feat_27_%s_10.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "#         df=df.head(100)\n",
    "        df_1 = df[df['actionType']==5].drop_duplicates(['userid'],keep='last')[['userid','actionTime']]\n",
    "        df_1.columns = ['userid','actionTime_5_last']\n",
    "        df=df[(df['actionType']==1) |(df['actionType']==6)|(df['actionType']==7)] \n",
    "#         df =  df[df['actionType']!=5]\n",
    "        df= df.drop_duplicates(['userid','actionType'],keep='last')\n",
    "       \n",
    "        df = pd.merge(df,df_1,on='userid',how='left') \n",
    "#         print(df.columns)\n",
    "#         df = df[df['actionTime']>=df['actionTime_5_last']]\n",
    "        df['get_feat_27_chazhi'] =df['actionTime'] - df['actionTime_5_last']\n",
    "        \n",
    "        df = df[['userid','actionType','get_feat_27_chazhi']].groupby(['userid','actionType']).sum()\n",
    "        \n",
    "        df = df.unstack()\n",
    "        columns =[]\n",
    "        for i in df.columns.levels[0]:\n",
    "            for j in  df.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j)+\"_nums\")\n",
    "        \n",
    "        df.columns =columns\n",
    "        df =df.reset_index()\n",
    "#         print(actions.head(20))\n",
    "#         print(actions.columns)\n",
    "        \n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "    \n",
    "def get_feat_28(train):\n",
    "    dump_path='../cache/get_feat_28_%s_8.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "#         df=df.head(100)\n",
    "        df_1 = df[df['actionType']==5][['userid','actionTime']]\n",
    "        df_1.columns = ['userid','actionTime_5_28_last']\n",
    "        df=df[(df['actionType']==1) |(df['actionType']==6)|(df['actionType']==7)] \n",
    "#         df =  df[df['actionType']!=5]\n",
    "#         df= df.drop_duplicates(['userid','actionType'],keep='last')\n",
    "        df = pd.merge(df,df_1,on='userid',how='left') \n",
    "#         print(df.columns)\n",
    "        df = df[df['actionTime']>=df['actionTime_5_28_last']]\n",
    "        df['get_feat_28_chazhi'] =df['actionTime'] - df['actionTime_5_28_last']\n",
    "        df = df[df['get_feat_28_chazhi']<60*5]\n",
    "        df = df[['userid','actionType','get_feat_28_chazhi']].groupby(['userid','actionType']).sum()\n",
    "        \n",
    "        df = df.unstack()\n",
    "        columns =[]\n",
    "        for i in df.columns.levels[0]:\n",
    "            for j in  df.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j)+\"_nums\")\n",
    "        \n",
    "        df.columns =columns\n",
    "        df =df.reset_index()\n",
    "#         print(actions.head(20))\n",
    "#         print(actions.columns)\n",
    "        \n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "# def get_feat_29(train):\n",
    "#     dump_path='../cache/get_feat_29_%s_9.csv'%train\n",
    "#     if os.path.exists(dump_path):\n",
    "#         actions = pd.read_csv(dump_path)\n",
    "#         return actions\n",
    "#     else:\n",
    "#         if(train=='train'):\n",
    "#             df = get_all_train_data()\n",
    "#         else:\n",
    "#             df = get_all_test_data()\n",
    "# #         df=df.head(100)\n",
    "#         df_1 = df[df['actionType']==5][['userid','actionTime']]\n",
    "#         df_1.columns = ['userid','actionTime_5_28_last']\n",
    "#         df=df[(df['actionType']==1) |(df['actionType']==6)|(df['actionType']==7)] \n",
    "# #         df =  df[df['actionType']!=5]\n",
    "# #         df= df.drop_duplicates(['userid','actionType'],keep='last')\n",
    "#         df = pd.merge(df,df_1,on='userid',how='left') \n",
    "# #         print(df.columns)\n",
    "#         df = df[df['actionTime']>=df['actionTime_5_28_last']]\n",
    "#         df['get_feat_28_chazhi'] =df['actionTime'] - df['actionTime_5_28_last']\n",
    "#         df = df[df['get_feat_28_chazhi']<60*5]\n",
    "#         df = df[['userid','actionType','get_feat_28_chazhi']].groupby(['userid','actionType']).sum()\n",
    "        \n",
    "#         df = df.unstack()\n",
    "#         columns =[]\n",
    "#         for i in df.columns.levels[0]:\n",
    "#             for j in  df.columns.levels[1]:\n",
    "#                 columns.append(str(i)+'_'+str(j)+\"_nums\")\n",
    "        \n",
    "#         df.columns =columns\n",
    "#         df =df.reset_index()\n",
    "# #         print(actions.head(20))\n",
    "# #         print(actions.columns)\n",
    "        \n",
    "#         df.to_csv(dump_path,index=False)\n",
    "#         return df\n",
    "def get_feat_30(train):\n",
    "    dump_path='../cache/get_feat_30_%s_9.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "#         df=df.head(100)\n",
    "        df_1 = df.drop_duplicates(['userid'],keep='last')[['userid','actionTime']]\n",
    "        df_1.columns = ['userid','actionTime_30_last']\n",
    "#         df_2 = df[(df['actionType']==2)|(df['actionType']==3)|(df['actionType']==4)] \n",
    "    \n",
    "        df=df[(df['actionType']==1)|(df['actionType']==5)|(df['actionType']==6)|(df['actionType']==7)] \n",
    "        df = df.drop_duplicates(['userid','actionType'],keep='last')\n",
    "#         df= df.drop_duplicates(['userid','actionType'],keep='last')\n",
    "        df = pd.merge(df,df_1,on='userid',how='left') \n",
    "#         print(df.columns)\n",
    "        \n",
    "        df['get_feat_30_chazhi'] =(df['actionTime_30_last'] - df['actionTime'])\n",
    "        df = df[['userid','actionType','get_feat_30_chazhi']].groupby(['userid','actionType']).sum()\n",
    "        \n",
    "        df = df.unstack()\n",
    "        columns =[]\n",
    "        for i in df.columns.levels[0]:\n",
    "            for j in  df.columns.levels[1]:\n",
    "                columns.append(str(i)+'_'+str(j)+\"_nums\")\n",
    "        \n",
    "        df.columns =columns\n",
    "        df =df.reset_index()\n",
    "#         print(actions.head(20))\n",
    "#         print(actions.columns)\n",
    "        \n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "    \n",
    "\n",
    "print(\"finish\")    \n",
    "\n",
    "print(\"finish\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "finish\n"
     ]
    }
   ],
   "source": [
    "def city_map(x):\n",
    "    if x =='新加坡' or x == '东京':\n",
    "        return 1\n",
    "    elif x=='纽约' or x=='台北' or x=='吉隆坡' or x=='悉尼' or x=='香港' or x=='大阪':\n",
    "        return 2\n",
    "    elif x=='墨尔本' or x=='曼谷' or x=='伦敦' or x=='洛杉矶' or x=='巴厘岛' or x=='普吉岛' or x=='首尔' or x=='旧金山' or x=='清迈' or x=='京都' or x=='巴黎':\n",
    "        return 3\n",
    "    else:\n",
    "        return 0\n",
    "\n",
    "def country_map(x):\n",
    "    if x =='日本' or x == '美国' or x == '澳大利亚':\n",
    "        return 1\n",
    "    elif  x =='新加坡' or x == '泰国' or x == '马来西亚' or  x =='中国台湾' or x == '中国香港' :\n",
    "        return 2\n",
    "    elif  x =='法国' or x == '英国' or x == '韩国' or  x =='印度尼西亚' or x == '加拿大' or x =='意大利' or x == '西班牙' or x == '新西兰' or  x =='越南' or x == '阿联酋':\n",
    "        return 3\n",
    "    else:\n",
    "        return 0\n",
    "def continent_map(x):\n",
    "    if x == '亚洲':\n",
    "        return 1\n",
    "    elif x == '北美洲':\n",
    "        return 2\n",
    "    elif x == '大洋洲':\n",
    "        return 3\n",
    "    else:\n",
    "        return 0\n",
    "\n",
    "    \n",
    "def get_feat_001(train):\n",
    "    dump_path='../cache/get_feat_001_%s_9.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        df = get_orderHistory(train)\n",
    "        df['city'] = df['city'].map(city_map)\n",
    "        df['country'] = df['country'].map(country_map)\n",
    "        df['continent'] = df['continent'].map(continent_map)\n",
    "        df_1 = pd.get_dummies(df['city'],prefix='city')\n",
    "        df_2 = pd.get_dummies(df['country'],prefix='country')\n",
    "        df_3 =  pd.get_dummies(df['continent'],prefix='continent')\n",
    "        \n",
    "        df = pd.concat([df[['userid']],df_1,df_2,df_3],axis=1)\n",
    "        \n",
    "        df = df.groupby(['userid'],as_index=False).sum()\n",
    "        \n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "def get_feat_002(train):\n",
    "    dump_path='../cache/get_feat_002_%s.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df_1 = get_orderHistory(train)\n",
    "        print(df_1.columns)\n",
    "        df = df[['userid','actionTime']].drop_duplicates(['userid'],keep='last')\n",
    "        \n",
    "        df_1 = df_1[['userid','orderTime']].drop_duplicates(['userid'],keep='last')\n",
    "        df = pd.merge(df,df_1,on='userid',how='left')\n",
    "#         df = df[df['actionTime']>=df['orderTime']]\n",
    "        df['get_feat_002'] = df['actionTime'] - df['orderTime']\n",
    "        df[['userid','get_feat_002']].to_csv(dump_path,index=False)\n",
    "        return df[['userid','get_feat_002']]\n",
    "# def get_feat_003(train):\n",
    "#     dump_path='../cache/get_feat_002_%s_9.csv'%train\n",
    "#     if os.path.exists(dump_path):\n",
    "#         actions = pd.read_csv(dump_path)\n",
    "#         return actions\n",
    "#     else:\n",
    "def get_feat_003(train):\n",
    "    dump_path='../cache/get_feat_003_%s——2.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "            \n",
    "        df['time'] = df['actionTime'].map( lambda x: datetime.datetime.utcfromtimestamp(x))\n",
    "        df['day'] = df['time'].map( lambda x: x.isocalendar()[1]*7+x.isocalendar()[2])\n",
    "        df['day'] = df['day']+ (df['time'].dt.year-2016)*356\n",
    "        \n",
    "        \n",
    "#         df['month'] = df['time'].dt.month\n",
    "        \n",
    "        df_1 = df[['userid','day']].drop_duplicates(['userid'],keep='last')\n",
    "        \n",
    "        df  =  df[['userid','day','actionTime']].groupby(['userid','day'],as_index=False).count()\n",
    "        \n",
    "        df.columns = ['userid','day','same_day']\n",
    "        \n",
    "        df = pd.merge(df_1,df,on = ['userid','day'],how='left')\n",
    "#         del df['month']\n",
    "        del df['day']\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "\n",
    "def get_feat_004(train):\n",
    "    dump_path='../cache/get_feat_004_%s——2.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "            \n",
    "        df['time'] = df['actionTime'].map( lambda x: datetime.datetime.utcfromtimestamp(x))\n",
    "        df['day'] = df['time'].map( lambda x: x.isocalendar()[1]*7+x.isocalendar()[2])\n",
    "        df['day'] = df['day']+ (df['time'].dt.year-2016)*356\n",
    "\n",
    "        \n",
    "        df_1 = df[['userid','day']].drop_duplicates(['userid'],keep='last')\n",
    "        \n",
    "        df  =  df[['userid','day']].groupby(['userid'])['day'].agg({\n",
    "                                                        'get_feat_004_last_1':diff_last_1,\n",
    "                                                        'get_feat_004_last_2':diff_last_2\n",
    "                                                        \n",
    "                                                })\n",
    "#         df['get_feat_004_last_1'] = (df['get_feat_004_last_1']+30)%30\n",
    "#         df['get_feat_004_last_2'] = (df['get_feat_004_last_2']+30)%30\n",
    "#         print(df)\n",
    "#         df = df.unstack()\n",
    "#         columns =[]\n",
    "#         for i in df.columns.levels[0]:\n",
    "#             for j in  df.columns.levels[1]:\n",
    "#                 columns.append(str(i)+'_'+str(j)+\"_nums\")\n",
    "        \n",
    "#         df.columns =columns\n",
    "        df =df.reset_index()\n",
    "#         del df['month']\n",
    "       \n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "def get_feat_006(train):\n",
    "    dump_path='../cache/get_feat_006_%s——3.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "            \n",
    "        df['date'] = df['actionTime'].map(lambda x:datetime.datetime.utcfromtimestamp(x).strftime('%Y-%m-%d'))\n",
    "        df['time'] = df['actionTime'].map( lambda x: datetime.datetime.utcfromtimestamp(x))\n",
    "        df['day'] = df['time'].map( lambda x: x.isocalendar()[1]*7+x.isocalendar()[2])\n",
    "        df['day'] = df['day']+ (df['time'].dt.year-2016)*356\n",
    "        # print(df)\n",
    "        df = df[df['actionType']==5]\n",
    "        df  =  df[['userid','day','actionTime']].groupby(['userid','day'],as_index=False)['actionTime'].agg({\n",
    "                                                                'get_feat_006':last_sub_fisrt,\n",
    "\n",
    "\n",
    "                                                        })\n",
    "        df  =  df[['userid','get_feat_006']].groupby(['userid'],as_index=False).mean()\n",
    "\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df  \n",
    "def last_sub_fisrt(x):\n",
    "    return x[-1:].mean()-x[:1].mean()\n",
    "def get_nums(x):\n",
    "    return x.shape[0]\n",
    "def get_feat_007(train):\n",
    "    dump_path='../cache/get_feat_007_%s——1.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "            \n",
    "        df['date'] = df['actionTime'].map(lambda x:datetime.datetime.utcfromtimestamp(x).strftime('%Y-%m-%d'))\n",
    "        df['time'] = df['actionTime'].map( lambda x: datetime.datetime.utcfromtimestamp(x))\n",
    "        df['day'] = df['time'].map( lambda x: x.isocalendar()[1]*7+x.isocalendar()[2])\n",
    "        df['day'] = df['day']+ (df['time'].dt.year-2016)*356\n",
    "        # print(df)\n",
    "        df['actionTime'] = 1505145600- df['actionTime']\n",
    "        df_1 = df.drop_duplicates(['userid','day'],keep='last')[['userid','day']]\n",
    "        df_1 = pd.merge(df_1,df,on=['userid','day'],how='left')\n",
    "        actions_1  =  df_1[['userid','actionTime']].groupby(['userid'],as_index=False)['actionTime'].agg({\n",
    "                                                                'get_feat_007_first_last':last_sub_fisrt,\n",
    "                                                                'get_feat_007_nums':get_nums,\n",
    "                                                                'get_feat_007_diff_mean':diff_mean,\n",
    "                                                                'get_feat_007_diff_std':diff_std,\n",
    "                                                        })\n",
    "#         df_1 = df.drop_duplicates(['user','day','actionType'],keep='last')\n",
    "        df_1 = df_1.drop_duplicates(['userid','day','actionType'],keep='last')\n",
    "#         df_1 = pd.merge(df_1,df,on=['user','day','actionType'],how='left')\n",
    "        actions_2  =  df_1[['userid','actionTime']].groupby(['userid'],as_index=False)['actionTime'].agg({\n",
    "                                                                'get_feat_007_first_last_not_reap':last_sub_fisrt,\n",
    "                                                                'get_feat_007_nums_not_reap':get_nums,\n",
    "                                                                'get_feat_007_diff_mean_not_reap':diff_mean,\n",
    "                                                                'get_feat_007_diff_std_not_reap':diff_std,\n",
    "                                                        })\n",
    "        df  =  pd.merge(actions_1,actions_2,on='userid',how='left')\n",
    "        df['rate_get_feat_007_first_last'] = df['get_feat_007_first_last_not_reap']/df['get_feat_007_first_last']\n",
    "        df['rate_get_feat_007_nums'] = df['get_feat_007_nums_not_reap']/df['get_feat_007_nums']\n",
    "        df['rate_get_get_feat_007_diff_mean'] = df['get_feat_007_diff_mean_not_reap']/df['get_feat_007_diff_mean']\n",
    "        df['rate_get_feat_007_diff_std'] = df['get_feat_007_diff_std_not_reap']/df['get_feat_007_diff_std']\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "def get_feat_008(train):\n",
    "    dump_path='../cache/get_feat_008_%s——2.csv'%train\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "            \n",
    "        df['date'] = df['actionTime'].map(lambda x:datetime.datetime.utcfromtimestamp(x).strftime('%Y-%m-%d'))\n",
    "        df['time'] = df['actionTime'].map( lambda x: datetime.datetime.utcfromtimestamp(x))\n",
    "        df['day'] = df['time'].map( lambda x: x.isocalendar()[1]*7+x.isocalendar()[2])\n",
    "        df['day'] = df['day']+ (df['time'].dt.year-2016)*356\n",
    "        \n",
    "        df = df.drop_duplicates(['userid','day','actionType'],keep='last')\n",
    "        \n",
    "        actions_1 = pd.get_dummies(df['actionType'],prefix='get_feat_007_type')\n",
    "        actions = pd.concat([df[['userid']],actions_1],axis=1)\n",
    "        actions = actions.groupby(['userid'],as_index=False).sum()\n",
    "        actions['get_feat_007_type']=0\n",
    "        for i in range(1,10,1):\n",
    "            actions['get_feat_007_type']=actions['get_feat_007_type']+actions['get_feat_007_type_'+str(i)]\n",
    "        for i in range(1,10,1):\n",
    "            actions['get_feat_007_type_rate_'+str(i)]=actions['get_feat_007_type_'+str(i)]/actions['get_feat_007_type']\n",
    "            del actions['get_feat_007_type_'+str(i)]\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "def get_feat_009(train):\n",
    "    dump_path='../cache/get_feat_009_%s.csv'%(train)\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df_order = get_orderHistory(train)\n",
    "#         df_order = \n",
    "        df_order_1 = df_order[['userid','orderTime']]\n",
    "        df_order_1.columns =['userid','orderTime_end']\n",
    "#         df_order = df_order[df_order['orderType']==0]\n",
    "        df_order_1 = df_order_1.drop_duplicates(['userid'],keep='last')\n",
    "    \n",
    "#         df_order = df_order[df_order['orderTime']<df_order['orderTime_end']]\n",
    "#         df_order = df_order[['userid','orderTime','orderTime_end']].drop_duplicates(['userid'],keep='last')\n",
    "#         print(df_order)\n",
    "        \n",
    "        df = pd.merge(df,df_order_1,on='userid',how='left')\n",
    "        df['orderTime_end'] = df['orderTime_end'].fillna(0)\n",
    "\n",
    "        df = df[(df['actionTime']>df['orderTime_end'])]\n",
    "        \n",
    "#         actions_1 = pd.get_dummies(df['actionType'],prefix='actions_16_type')\n",
    "# #         print(actions_1)\n",
    "#         actions = pd.concat([df[['userid']],actions_1],axis=1)\n",
    "#         actions = actions.groupby(['userid'],as_index=False).sum()\n",
    "#         actions['actions_16_type']=0\n",
    "#         for i in range(1,10,1):\n",
    "#             actions['actions_16_type']=actions['actions_16_type']+actions['actions_16_type_'+str(i)]\n",
    "#         for i in range(1,10,1):\n",
    "#             actions['actions_16_type_rate_'+str(i)]=actions['actions_16_type_'+str(i)]/actions['actions_16_type']\n",
    "#             del actions['actions_16_type_'+str(i)]\n",
    "        actions = df[['userid','actionTime']].groupby(['userid'])['actionTime'].agg({\n",
    "                                                                      'get_feat_009_mean':diff_mean,\n",
    "                                                                      'get_feat_009_std':diff_std,\n",
    "                                                                      'get_feat_009_max':diff_max,\n",
    "                                                                      'get_feat_009_min':diff_min,\n",
    "                                        \n",
    "#                                                                       'feat_3_last_2':diff_last_2,\n",
    "#                                                                       'feat_3_last_3':diff_last_3\n",
    "                                                                    })\n",
    "        actions = actions.reset_index()\n",
    "#         actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "\n",
    "def get_feat_010(train):\n",
    "    dump_path='../cache/get_feat_010_%s_1.csv'%(train)\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        df_prof = get_userProfile(train)\n",
    "        df_order = get_orderHistory(train)\n",
    "        \n",
    "        df =pd.merge(df_order,df_prof,on='userid',how='left')\n",
    "        actions = df[['userid','province']].drop_duplicates(['userid'],keep='last')\n",
    "        # df['date'] = df['orderTime'].map(lambda x:datetime.datetime.utcfromtimestamp(x).strftime('%Y-%m-%d'))\n",
    "#         df_1 = df[df['orderType']==1]\n",
    "        \n",
    "        actions_0 = df[['province','orderType']].groupby('province',as_index=False).count()\n",
    "        actions_0.columns = ['province','orderType_0_nums']\n",
    "        \n",
    "        actions_1 = df[['province','orderType']].groupby('province',as_index=False).sum()\n",
    "        actions_1.columns = ['province','orderType_1_nums']\n",
    "        \n",
    "        actions = pd.merge(actions,actions_0,on='province',how='left')\n",
    "        \n",
    "        actions = pd.merge(actions,actions_1,on='province',how='left')\n",
    "        \n",
    "        actions['rate_orderType'] = actions['orderType_1_nums']/ actions['orderType_0_nums']\n",
    "        del actions['province']\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "        return actions\n",
    "def get_feat_011(train):\n",
    "    dump_path='../cache/get_feat_11_%s.csv'%(train)\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df = pd.merge(df,df,on='userid',how='left')\n",
    "        df = df[df['actionTime_x']>df['actionTime_y']]\n",
    "        df['diff_time'] =df['actionTime_x'] - df['actionTime_y']\n",
    "        df = df.sort_values(by='diff_time',ascending=1)\n",
    "        df =df[['userid','diff_time']].groupby(['userid'])['diff_time'].agg({\n",
    "                                    'get_feat_011_first_1_time':first_1,\n",
    "                        'get_feat_011_first_2_time':first_2,\n",
    "                        'get_feat_011_first_3_time':first_3\n",
    "        })\n",
    "        df = df.reset_index()\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "\n",
    "\n",
    "def get_feat_011_1(train):\n",
    "    dump_path='../cache/get_feat_011_zl11_%s_1.csv'%(train)\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        \n",
    "        df = pd.merge(df,df,on='userid',how='left')\n",
    "        df = df[df['actionTime_x']>df['actionTime_y']]\n",
    "        df['diff_time'] =df['actionTime_x'] - df['actionTime_y']\n",
    "        df = df.sort_values(by='diff_time',ascending=1)\n",
    "        df =df[['userid','actionType_x','actionType_y']].groupby(['userid'],as_index=False)['actionType_x','actionType_y'].agg({\n",
    "                                'get_feat_011_1_first_type':first_1,\n",
    "                    'get_feat_011_1_first_2_type':first_2,\n",
    "                    'get_feat_011_1_first_3_type':first_3\n",
    "        })\n",
    "        \n",
    "        df_1 = df['userid']\n",
    "        \n",
    "        df_2 = pd.DataFrame({'get_feat_011_1_first_type_1':df['get_feat_011_1_first_type'].actionType_x,'get_feat_011_1_first_type_2':df['get_feat_011_1_first_type'].actionType_y})\n",
    "        df_3 = pd.DataFrame({'get_feat_011_2_first_type_1':df['get_feat_011_1_first_2_type'].actionType_x,'get_feat_011_2_first_type_2':df['get_feat_011_1_first_2_type'].actionType_y})\n",
    "\n",
    "        df_4 = pd.DataFrame({'get_feat_011_3_first_type_1':df['get_feat_011_1_first_3_type'].actionType_x,'get_feat_011_3_first_type_2':df['get_feat_011_1_first_3_type'].actionType_y})\n",
    "        df = pd.concat([df_1,df_2,df_3,df_4],axis=1)\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "def get_feat_012(train,type_s,type_e):\n",
    "    dump_path='../cache/get_feat_12_%s_%s_%s_2.csv'%(train,str(type_s),str(type_e))\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df_1 = df[df['actionType']==type_s]\n",
    "        df_2 = df[df['actionType']==type_e]\n",
    "        df = pd.merge(df_1,df_2,on='userid',how='left')\n",
    "        df = df[df['actionTime_x']<df['actionTime_y']]\n",
    "        df['diff_time_'+str(type_s)+\"_\"+str(type_e)] =df['actionTime_y'] - df['actionTime_x']\n",
    "        df = df.sort_values(by='diff_time_'+str(type_s)+\"_\"+str(type_e),ascending=1)\n",
    "        df =df[['userid','diff_time_'+str(type_s)+\"_\"+str(type_e)]].groupby(['userid'])['diff_time_'+str(type_s)+\"_\"+str(type_e)].agg({\n",
    "                                    'get_feat_011_first_1_time'+'diff_time_'+str(type_s)+\"_\"+str(type_e):first_1,\n",
    "                        'get_feat_011_first_2_time'+'diff_time_'+str(type_s)+\"_\"+str(type_e):first_2,\n",
    "                        'get_feat_011_first_3_time'+'diff_time_'+str(type_s)+\"_\"+str(type_e):first_3\n",
    "        })\n",
    "        df = df.reset_index()\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "\n",
    "\n",
    "def get_feat_013(train,type_s,type_e):\n",
    "    dump_path='../cache/get_feat_13_%s_%s_%s_2.csv'%(train,str(type_s),str(type_e))\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        \n",
    "        df_1 = df[df['actionType']==type_s]\n",
    "        df_2 = df[df['actionType']==type_e]\n",
    "        df = pd.merge(df_1,df_2,on='userid',how='left')\n",
    "        \n",
    "        df = df[df['actionTime_x']<df['actionTime_y']]\n",
    "       \n",
    "        df['diff_time_'+str(type_s)+\"_\"+str(type_e)] =df['actionTime_y'] - df['actionTime_x']\n",
    "        df = df.sort_values(by=['userid','actionTime_x','actionTime_y'],ascending=[1,1,1]) \n",
    "        df = df.drop_duplicates(['userid','actionTime_x'],keep='first')\n",
    "        df =df[['userid','diff_time_'+str(type_s)+\"_\"+str(type_e)]].groupby(['userid'])['diff_time_'+str(type_s)+\"_\"+str(type_e)].agg({\n",
    "                                    'get_feat_013_first_1_time'+'diff_time_'+str(type_s)+\"_\"+str(type_e):first_1,\n",
    "                        'get_feat_013_first_2_time'+'diff_time_'+str(type_s)+\"_\"+str(type_e):first_2,\n",
    "                        'get_feat_013_first_3_time'+'diff_time_'+str(type_s)+\"_\"+str(type_e):first_3,\n",
    "                        'get_feat_013_last_1_time'+'diff_time_'+str(type_s)+\"_\"+str(type_e):last_1,\n",
    "                        'get_feat_013_last_2_time'+'diff_time_'+str(type_s)+\"_\"+str(type_e):last_2,\n",
    "                        'get_feat_013_last_3_time'+'diff_time_'+str(type_s)+\"_\"+str(type_e):last_3,\n",
    "        })\n",
    "        df = df.reset_index()\n",
    "        df.to_csv(dump_path,index=False)\n",
    "        return df\n",
    "def get_feat_015(train,type_s):\n",
    "    dump_path='../cache/get_feat_15_%s_%s_5.csv'%(train,str(type_s))\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        \n",
    "        df_1 = df[df['actionType']==type_s]\n",
    "#         df_2 = df[df['actionType']==type_e]\n",
    "        \n",
    "        \n",
    "        df = df[df.actionType!=type_s]\n",
    "        \n",
    "        df = pd.merge(df_1,df,on='userid',how='left')\n",
    "        \n",
    "        df = df[df['actionTime_x']<df['actionTime_y']]\n",
    "\n",
    "#         df['diff_time_'+str(type_s)+\"_\"+str(type_e)] =df['actionTime_y'] - df['actionTime_x']\n",
    "        df = df.sort_values(by=['userid','actionTime_x','actionTime_y'],ascending=[1,1,1]) \n",
    "#         df = df.drop_duplicates(['userid','actionTime_y'],keep='first')\n",
    "        df = df.drop_duplicates(['userid','actionTime_x'],keep='first')\n",
    "#         df = df.sort_values(by='diff_time_'+str(type_s)+\"_\"+str(type_e),ascending=0)\n",
    "        df =df[['userid','actionType_y']].groupby(['userid'])['actionType_y'].agg({\n",
    "                         'get_feat_015_first_1_time'+'diff_time_'+str(type_s):first_1,\n",
    "                        'get_feat_015_first_2_time'+'diff_time_'+str(type_s):first_2,\n",
    "                        'get_feat_015_first_3_time'+'diff_time_'+str(type_s):first_3,\n",
    "                        'get_feat_015_last_1_time'+'diff_time_'+str(type_s):last_1,\n",
    "                        'get_feat_015_last_2_time'+'diff_time_'+str(type_s):last_2,\n",
    "                        'get_feat_015_last_3_time'+'diff_time_'+str(type_s):last_3,\n",
    "                        \n",
    "        })\n",
    "        df = df.reset_index()\n",
    "#         df.to_csv(dump_path,index=False)\n",
    "        return df        \n",
    "    \n",
    "def different_count(x):\n",
    "#     print(x)\n",
    "    y=[]\n",
    "    for i in range(1,x.shape[0]):\n",
    "        if(x.iloc[i]!=x.iloc[i-1]):\n",
    "            y.append(x.iloc[i-1])\n",
    "    if(x.iloc[x.shape[0]-1]!=x.iloc[x.shape[0]-2]):\n",
    "        y.append(x.iloc[len(x)-1])\n",
    "    y = np.asarray(y)\n",
    "    return y[0:1].mean(),y[1:2].mean(),y[2:3].mean(),y[-3:-2].mean(),y[-2:-1].mean(),y[-1:].mean()\n",
    "\n",
    "\n",
    "    \n",
    "def get_feat_014(train):\n",
    "    dump_path='../cache/get_feat_14_%s.csv'%(train)\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        \n",
    "        df = df.groupby(['userid'],as_index=False)['actionType'].agg({\n",
    "            'get_feat_014':different_count,\n",
    "        })\n",
    "        print(df.head())\n",
    "        df_2 = df[['userid']]\n",
    "        df_1 = pd.DataFrame(list(df['get_feat_014']),columns=['get_feat_014_first_1','get_feat_014_first_2','get_feat_014_first_3','get_feat_014_last_3','get_feat_014_last_2','get_feat_014_last_1'])\n",
    "        print(df_1.head())\n",
    "#         df_1 = df_1.astype('Int32')\n",
    "#         print(df_1.head())\n",
    "        df = pd.concat([df_2,df_1],axis=1)\n",
    "        print(df.head())\n",
    "        \n",
    "        df.to_csv(dump_path,index=False)\n",
    "\n",
    "        return df\n",
    "    \n",
    "def get_feat_016(train):\n",
    "    dump_path='../cache/get_feat_016_%s.csv'%(train)\n",
    "    if os.path.exists(dump_path):\n",
    "        actions = pd.read_csv(dump_path)\n",
    "        return actions\n",
    "    else:\n",
    "        if(train=='train'):\n",
    "            df = get_all_train_data()\n",
    "        else:\n",
    "            df = get_all_test_data()\n",
    "        df_1 = df.drop_duplicates(['userid'],keep='last')[['userid','actionTime']]\n",
    "        df_1.columns = ['userid','start_time']\n",
    "        df = pd.merge(df,df_1,on='userid',how='left')\n",
    "        df['start_time'] = df['start_time'] -3600*24*30\n",
    "        df = df[df['actionTime']>df['start_time']]\n",
    "        actions = df[['userid','actionTime']].groupby(['userid'])['actionTime'].agg({\n",
    "                                                                      'feat_16_mean':diff_mean,\n",
    "                                                                      'feat_3_std':diff_std,\n",
    "#                                                                       'feat_3_max':diff_max,\n",
    "                                                                      'feat_16_min':diff_min,\n",
    "                                                                      'feat_16_last_1':diff_last_1,\n",
    "                                                                      'feat_16_last_2':diff_last_2,\n",
    "                                                                      'feat_16_last_3':diff_last_3,\n",
    "            \n",
    "                                                                      'feat_16_first_1': diff_first_1,\n",
    "                                                                      'feat_16_first_2': diff_first_2,\n",
    "                                                                      'feat_16_first_3': diff_first_3,\n",
    "            \n",
    "                                                                     \n",
    "                                                                    })\n",
    "        actions = actions.reset_index()\n",
    "        actions.to_csv(dump_path,index=False)\n",
    "\n",
    "        return actions\n",
    "        \n",
    "print(\"finish\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "finish\n"
     ]
    }
   ],
   "source": [
    "def get_feat(train):\n",
    "    df = get_feat_1(train)  #1\n",
    "    df = pd.merge(df,get_feat_2(train),on='userid',how='outer')\n",
    "    df = pd.merge(df,get_feat_3(train),on='userid',how='outer') #修改\n",
    "#     print(df.shape)\n",
    "    df = pd.merge(df,get_feat_4(train),on='userid',how='outer')#部分有用\n",
    "#     print(df.shape)\n",
    "\n",
    "   \n",
    "    df = pd.merge(df,get_feat_6(train),on='userid',how='outer') #部分有用\n",
    "    df = pd.merge(df,get_feat_7(train),on='userid',how='outer')#1\n",
    "    \n",
    "    \n",
    "\n",
    "    df = pd.merge(df,get_feat_20(train),on='userid',how='outer')\n",
    "\n",
    "    df = pd.merge(df,get_feat_22(train),on='userid',how='outer')\n",
    "    df = pd.merge(df,get_feat_22_1567(train),on='userid',how='outer')\n",
    "    df = pd.merge(df,get_feat_26(train),on='userid',how='outer')\n",
    "    df = pd.merge(df,get_feat_27(train),on='userid',how='outer')   #0.1%\n",
    "    df = pd.merge(df,get_feat_28(train),on='userid',how='outer')\n",
    "\n",
    "    df = pd.merge(df,get_feat_30(train),on='userid',how='outer')\n",
    "    df = pd.merge(df,get_feat_001(train),on='userid',how='outer')  \n",
    "\n",
    "    \n",
    "    df = pd.merge(df,get_feat_011(train),on='userid',how='outer')\n",
    "\n",
    "    \n",
    "#     df = pd.merge(df,get_feat_012(train,2,2),on='userid',how='outer')#15562\n",
    "#     df = pd.merge(df,get_feat_012(train,1,3),on='userid',how='outer')#11900\n",
    "#     df = pd.merge(df,get_feat_012(train,1,4),on='userid',how='outer')#8591\n",
    "    \n",
    "    \n",
    "    \n",
    "    df = pd.merge(df,get_feat_012(train,5,6),on='userid',how='outer') #35984\n",
    "    df = pd.merge(df,get_feat_012(train,6,7),on='userid',how='outer')#15354\n",
    "    df = pd.merge(df,get_feat_012(train,7,8),on='userid',how='outer')#4649\n",
    "    df = pd.merge(df,get_feat_012(train,8,9),on='userid',how='outer')#8544\n",
    "    df = pd.merge(df,get_feat_012(train,5,7),on='userid',how='outer')#15562\n",
    "    df = pd.merge(df,get_feat_012(train,5,8),on='userid',how='outer')#11900\n",
    "    df = pd.merge(df,get_feat_012(train,5,9),on='userid',how='outer')#8591\n",
    "    \n",
    "#     df =  pd.merge(df,get_feat_012(train,5,5),on='userid',how='outer') #35260\n",
    "#     df =  pd.merge(df,get_feat_012(train,6,6),on='userid',how='outer') #29359\n",
    "\n",
    "#     df = pd.merge(df,get_feat_012(train,1,5),on='userid',how='outer')#36132\n",
    "#     df = pd.merge(df,get_feat_012(train,1,6),on='userid',how='outer')#34371\n",
    "#     df = pd.merge(df,get_feat_012(train,1,7),on='userid',how='outer')#15053\n",
    "#     df = pd.merge(df,get_feat_012(train,1,8),on='userid',how='outer')#11163\n",
    "#     df = pd.merge(df,get_feat_012(train,1,9),on='userid',how='outer')#8437\n",
    "    \n",
    "\n",
    "#     df = pd.merge(df,get_feat_012(train,5,1),on='userid',how='outer')#30482\n",
    "#     df = pd.merge(df,get_feat_012(train,6,1),on='userid',how='outer')#25178\n",
    "#     df = pd.merge(df,get_feat_012(train,7,1),on='userid',how='outer')#6995\n",
    "#     df = pd.merge(df,get_feat_012(train,8,1),on='userid',how='outer')#11356\n",
    "#     df = pd.merge(df,get_feat_012(train,9,1),on='userid',how='outer')#8773\n",
    "    \n",
    "#     df = pd.merge(df,get_feat_013(train,1,2),on='userid',how='outer')#15562\n",
    "#     df = pd.merge(df,get_feat_013(train,1,3),on='userid',how='outer')#11900\n",
    "#     df = pd.merge(df,get_feat_013(train,1,4),on='userid',how='outer')#8591\n",
    "    \n",
    "    \n",
    "    df = pd.merge(df,get_feat_013(train,5,6),on='userid',how='outer') #35984\n",
    "    df = pd.merge(df,get_feat_013(train,6,7),on='userid',how='outer')#15354\n",
    "    df = pd.merge(df,get_feat_013(train,7,8),on='userid',how='outer')#4649\n",
    "    df = pd.merge(df,get_feat_013(train,8,9),on='userid',how='outer')#8544\n",
    "    df = pd.merge(df,get_feat_013(train,5,7),on='userid',how='outer')#15562\n",
    "    df = pd.merge(df,get_feat_013(train,5,8),on='userid',how='outer')#11900\n",
    "    df = pd.merge(df,get_feat_013(train,5,9),on='userid',how='outer')#8591\n",
    "    \n",
    "#     df = pd.merge(df,get_feat_013(train,1,5),on='userid',how='outer')#36132\n",
    "#     df = pd.merge(df,get_feat_013(train,1,6),on='userid',how='outer')#34371\n",
    "#     df = pd.merge(df,get_feat_013(train,1,7),on='userid',how='outer')#15053\n",
    "#     df = pd.merge(df,get_feat_013(train,1,8),on='userid',how='outer')#11163\n",
    "#     df = pd.merge(df,get_feat_013(train,1,9),on='userid',how='outer')#8437\n",
    "    \n",
    "\n",
    "#     df = pd.merge(df,get_feat_013(train,5,1),on='userid',how='outer')#30482\n",
    "#     df = pd.merge(df,get_feat_013(train,6,1),on='userid',how='outer')#25178\n",
    "#     df = pd.merge(df,get_feat_013(train,7,1),on='userid',how='outer')#6995\n",
    "#     df = pd.merge(df,get_feat_013(train,8,1),on='userid',how='outer')#11356\n",
    "#     df = pd.merge(df,get_feat_013(train,9,1),on='userid',how='outer')#8773\n",
    "\n",
    "\n",
    "\n",
    "#     df = pd.merge(df,get_feat_013(train,1,2),on='userid',how='outer')#15562\n",
    "#     df = pd.merge(df,get_feat_013(train,1,3),on='userid',how='outer')#11900\n",
    "#     df = pd.merge(df,get_feat_013(train,1,4),on='userid',how='outer')#8591\n",
    "    \n",
    "#     df = pd.merge(df,get_feat_015(train,1),on='userid',how='outer')\n",
    "#     df = pd.merge(df,get_feat_015(train,5),on='userid',how='outer') #35984\n",
    "#     df = pd.merge(df,get_feat_015(train,6),on='userid',how='outer')#15354\n",
    "#     df = pd.merge(df,get_feat_015(train,7),on='userid',how='outer')#4649\n",
    "#     df = pd.merge(df,get_feat_015(train,8),on='userid',how='outer')#8544\n",
    "#     df = pd.merge(df,get_feat_015(train,5,7),on='userid',how='outer')#15562\n",
    "#     df = pd.merge(df,get_feat_015(train,5,8),on='userid',how='outer')#11900\n",
    "#     df = pd.merge(df,get_feat_015(train,5,9),on='userid',how='outer')#8591\n",
    "    \n",
    "#     df = pd.merge(df,get_feat_013(train,1,5),on='userid',how='outer')#36132\n",
    "#     df = pd.merge(df,get_feat_013(train,1,6),on='userid',how='outer')#34371\n",
    "#     df = pd.merge(df,get_feat_013(train,1,7),on='userid',how='outer')#15053\n",
    "#     df = pd.merge(df,get_feat_013(train,1,8),on='userid',how='outer')#11163\n",
    "#     df = pd.merge(df,get_feat_013(train,1,9),on='userid',how='outer')#8437\n",
    "    \n",
    "\n",
    "#     df = pd.merge(df,get_feat_013(train,5,1),on='userid',how='outer')#30482\n",
    "#     df = pd.merge(df,get_feat_013(train,6,1),on='userid',how='outer')#25178\n",
    "#     df = pd.merge(df,get_feat_013(train,7,1),on='userid',how='outer')#6995\n",
    "#     df = pd.merge(df,get_feat_013(train,8,1),on='userid',how='outer')#11356\n",
    "#     df = pd.merge(df,get_feat_013(train,9,1),on='userid',how='outer')#8773\n",
    "\n",
    "\n",
    "#     df = pd.merge(df,get_feat_016(train),on='userid',how='outer')\n",
    "       \n",
    "\n",
    "    \n",
    " \n",
    "    \n",
    "    \n",
    "    df = pd.merge(df,get_userprofile_map(train),on='userid',how='outer')\n",
    "    return df\n",
    "def get_train_df():\n",
    "    df = pd.read_csv('../data/train/orderFuture_train.csv')\n",
    "    print(df.shape)\n",
    "    df = pd.merge(df,get_feat('train'),on='userid',how='left')\n",
    "    print(df.shape)\n",
    "    return df\n",
    "def get_test_df():\n",
    "    df = pd.read_csv('../data/test/orderFuture_test.csv')\n",
    "    print(df.shape)\n",
    "    df = pd.merge(df,get_feat('test'),on='userid',how='left')\n",
    "    print(df.shape)\n",
    "    \n",
    "    return df\n",
    "print(\"finish\")   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40307, 2)\n",
      "(40307, 213)\n",
      "(10076, 2)\n",
      "(10076, 213)\n"
     ]
    }
   ],
   "source": [
    "train = get_train_df()\n",
    "test = get_test_df()\n",
    "# train = drop_nan(train)\n",
    "# test = test[train.columns]\n",
    "# print(train.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0]\ttrain-auc:0.777225\teval-auc:0.771994\n",
      "Multiple eval metrics have been passed: 'eval-auc' will be used for early stopping.\n",
      "\n",
      "Will train until eval-auc hasn't improved in 20 rounds.\n",
      "[1]\ttrain-auc:0.834034\teval-auc:0.820159\n",
      "[2]\ttrain-auc:0.83967\teval-auc:0.825565\n",
      "[3]\ttrain-auc:0.848989\teval-auc:0.832756\n",
      "[4]\ttrain-auc:0.855027\teval-auc:0.837524\n",
      "[5]\ttrain-auc:0.861655\teval-auc:0.844941\n",
      "[6]\ttrain-auc:0.860848\teval-auc:0.8443\n",
      "[7]\ttrain-auc:0.873555\teval-auc:0.86019\n",
      "[8]\ttrain-auc:0.873554\teval-auc:0.859947\n",
      "[9]\ttrain-auc:0.873096\teval-auc:0.859623\n",
      "[10]\ttrain-auc:0.873407\teval-auc:0.860462\n",
      "[11]\ttrain-auc:0.885994\teval-auc:0.870548\n",
      "[12]\ttrain-auc:0.885911\teval-auc:0.870291\n",
      "[13]\ttrain-auc:0.885113\teval-auc:0.870184\n",
      "[14]\ttrain-auc:0.886838\teval-auc:0.871625\n",
      "[15]\ttrain-auc:0.893292\teval-auc:0.878629\n",
      "[16]\ttrain-auc:0.893558\teval-auc:0.878883\n",
      "[17]\ttrain-auc:0.896246\teval-auc:0.881254\n",
      "[18]\ttrain-auc:0.896829\teval-auc:0.881791\n",
      "[19]\ttrain-auc:0.898603\teval-auc:0.883278\n",
      "[20]\ttrain-auc:0.898887\teval-auc:0.884087\n",
      "[21]\ttrain-auc:0.898935\teval-auc:0.883974\n",
      "[22]\ttrain-auc:0.900422\teval-auc:0.885763\n",
      "[23]\ttrain-auc:0.900581\teval-auc:0.885859\n",
      "[24]\ttrain-auc:0.901003\teval-auc:0.886416\n",
      "[25]\ttrain-auc:0.905271\teval-auc:0.889052\n",
      "[26]\ttrain-auc:0.908054\teval-auc:0.892371\n",
      "[27]\ttrain-auc:0.909128\teval-auc:0.894236\n",
      "[28]\ttrain-auc:0.911002\teval-auc:0.895921\n",
      "[29]\ttrain-auc:0.91186\teval-auc:0.896722\n",
      "[30]\ttrain-auc:0.912745\teval-auc:0.898347\n",
      "[31]\ttrain-auc:0.913134\teval-auc:0.899731\n",
      "[32]\ttrain-auc:0.916687\teval-auc:0.902905\n",
      "[33]\ttrain-auc:0.91958\teval-auc:0.905344\n",
      "[34]\ttrain-auc:0.921136\teval-auc:0.90717\n",
      "[35]\ttrain-auc:0.92307\teval-auc:0.908792\n",
      "[36]\ttrain-auc:0.924936\teval-auc:0.910529\n",
      "[37]\ttrain-auc:0.927921\teval-auc:0.913321\n",
      "[38]\ttrain-auc:0.928816\teval-auc:0.914113\n",
      "[39]\ttrain-auc:0.93072\teval-auc:0.916066\n",
      "[40]\ttrain-auc:0.931439\teval-auc:0.91668\n",
      "[41]\ttrain-auc:0.932286\teval-auc:0.917401\n",
      "[42]\ttrain-auc:0.932562\teval-auc:0.91782\n",
      "[43]\ttrain-auc:0.934599\teval-auc:0.920288\n",
      "[44]\ttrain-auc:0.936253\teval-auc:0.921771\n",
      "[45]\ttrain-auc:0.937016\teval-auc:0.922363\n",
      "[46]\ttrain-auc:0.938176\teval-auc:0.924014\n",
      "[47]\ttrain-auc:0.940032\teval-auc:0.926384\n",
      "[48]\ttrain-auc:0.940929\teval-auc:0.927286\n",
      "[49]\ttrain-auc:0.941495\teval-auc:0.92787\n",
      "[50]\ttrain-auc:0.942058\teval-auc:0.928577\n",
      "[51]\ttrain-auc:0.943105\teval-auc:0.929809\n",
      "[52]\ttrain-auc:0.944285\teval-auc:0.931307\n",
      "[53]\ttrain-auc:0.944961\teval-auc:0.931889\n",
      "[54]\ttrain-auc:0.946074\teval-auc:0.933458\n",
      "[55]\ttrain-auc:0.946499\teval-auc:0.934088\n",
      "[56]\ttrain-auc:0.946696\teval-auc:0.934278\n",
      "[57]\ttrain-auc:0.947629\teval-auc:0.935364\n",
      "[58]\ttrain-auc:0.94795\teval-auc:0.935777\n",
      "[59]\ttrain-auc:0.948541\teval-auc:0.936526\n",
      "[60]\ttrain-auc:0.948616\teval-auc:0.936682\n",
      "[61]\ttrain-auc:0.949045\teval-auc:0.937212\n",
      "[62]\ttrain-auc:0.94974\teval-auc:0.938065\n",
      "[63]\ttrain-auc:0.950684\teval-auc:0.939528\n",
      "[64]\ttrain-auc:0.95087\teval-auc:0.939917\n",
      "[65]\ttrain-auc:0.951052\teval-auc:0.940126\n",
      "[66]\ttrain-auc:0.951395\teval-auc:0.940481\n",
      "[67]\ttrain-auc:0.952202\teval-auc:0.941603\n",
      "[68]\ttrain-auc:0.95236\teval-auc:0.941807\n",
      "[69]\ttrain-auc:0.952711\teval-auc:0.942004\n",
      "[70]\ttrain-auc:0.95301\teval-auc:0.942361\n",
      "[71]\ttrain-auc:0.953428\teval-auc:0.942746\n",
      "[72]\ttrain-auc:0.953773\teval-auc:0.943134\n",
      "[73]\ttrain-auc:0.95396\teval-auc:0.943106\n",
      "[74]\ttrain-auc:0.954341\teval-auc:0.943541\n",
      "[75]\ttrain-auc:0.954631\teval-auc:0.943831\n",
      "[76]\ttrain-auc:0.954822\teval-auc:0.944089\n",
      "[77]\ttrain-auc:0.955263\teval-auc:0.944779\n",
      "[78]\ttrain-auc:0.955364\teval-auc:0.944883\n",
      "[79]\ttrain-auc:0.955795\teval-auc:0.945445\n",
      "[80]\ttrain-auc:0.956083\teval-auc:0.945901\n",
      "[81]\ttrain-auc:0.956229\teval-auc:0.946017\n",
      "[82]\ttrain-auc:0.956603\teval-auc:0.946521\n",
      "[83]\ttrain-auc:0.957013\teval-auc:0.946869\n",
      "[84]\ttrain-auc:0.957113\teval-auc:0.947094\n",
      "[85]\ttrain-auc:0.957259\teval-auc:0.947305\n",
      "[86]\ttrain-auc:0.9575\teval-auc:0.94751\n",
      "[87]\ttrain-auc:0.957801\teval-auc:0.947742\n",
      "[88]\ttrain-auc:0.958189\teval-auc:0.94818\n",
      "[89]\ttrain-auc:0.958481\teval-auc:0.9483\n",
      "[90]\ttrain-auc:0.958613\teval-auc:0.948485\n",
      "[91]\ttrain-auc:0.958685\teval-auc:0.948529\n",
      "[92]\ttrain-auc:0.958917\teval-auc:0.94875\n",
      "[93]\ttrain-auc:0.95907\teval-auc:0.948779\n",
      "[94]\ttrain-auc:0.959145\teval-auc:0.94883\n",
      "[95]\ttrain-auc:0.959417\teval-auc:0.949019\n",
      "[96]\ttrain-auc:0.959379\teval-auc:0.949003\n",
      "[97]\ttrain-auc:0.959595\teval-auc:0.949267\n",
      "[98]\ttrain-auc:0.959857\teval-auc:0.949449\n",
      "[99]\ttrain-auc:0.960137\teval-auc:0.949803\n",
      "[100]\ttrain-auc:0.96029\teval-auc:0.950007\n",
      "[101]\ttrain-auc:0.960422\teval-auc:0.950097\n",
      "[102]\ttrain-auc:0.96075\teval-auc:0.950413\n",
      "[103]\ttrain-auc:0.96089\teval-auc:0.950498\n",
      "[104]\ttrain-auc:0.961073\teval-auc:0.950756\n",
      "[105]\ttrain-auc:0.961185\teval-auc:0.950874\n",
      "[106]\ttrain-auc:0.961265\teval-auc:0.950957\n",
      "[107]\ttrain-auc:0.96126\teval-auc:0.950896\n",
      "[108]\ttrain-auc:0.961382\teval-auc:0.951024\n",
      "[109]\ttrain-auc:0.961463\teval-auc:0.951111\n",
      "[110]\ttrain-auc:0.961678\teval-auc:0.951231\n",
      "[111]\ttrain-auc:0.96179\teval-auc:0.951326\n",
      "[112]\ttrain-auc:0.961962\teval-auc:0.951412\n",
      "[113]\ttrain-auc:0.962206\teval-auc:0.951679\n",
      "[114]\ttrain-auc:0.96239\teval-auc:0.951889\n",
      "[115]\ttrain-auc:0.962579\teval-auc:0.952081\n",
      "[116]\ttrain-auc:0.962704\teval-auc:0.952284\n",
      "[117]\ttrain-auc:0.962786\teval-auc:0.952281\n",
      "[118]\ttrain-auc:0.962842\teval-auc:0.952398\n",
      "[119]\ttrain-auc:0.96295\teval-auc:0.952487\n",
      "[120]\ttrain-auc:0.963043\teval-auc:0.952649\n",
      "[121]\ttrain-auc:0.963213\teval-auc:0.952864\n",
      "[122]\ttrain-auc:0.963366\teval-auc:0.952922\n",
      "[123]\ttrain-auc:0.963547\teval-auc:0.953079\n",
      "[124]\ttrain-auc:0.963694\teval-auc:0.953169\n",
      "[125]\ttrain-auc:0.963817\teval-auc:0.953274\n",
      "[126]\ttrain-auc:0.96402\teval-auc:0.953425\n",
      "[127]\ttrain-auc:0.964077\teval-auc:0.953483\n",
      "[128]\ttrain-auc:0.964126\teval-auc:0.953564\n",
      "[129]\ttrain-auc:0.964189\teval-auc:0.953662\n",
      "[130]\ttrain-auc:0.964328\teval-auc:0.953709\n",
      "[131]\ttrain-auc:0.964497\teval-auc:0.953865\n",
      "[132]\ttrain-auc:0.964568\teval-auc:0.953972\n",
      "[133]\ttrain-auc:0.964666\teval-auc:0.953988\n",
      "[134]\ttrain-auc:0.964793\teval-auc:0.954103\n",
      "[135]\ttrain-auc:0.964929\teval-auc:0.954197\n",
      "[136]\ttrain-auc:0.964968\teval-auc:0.954244\n",
      "[137]\ttrain-auc:0.965163\teval-auc:0.954367\n",
      "[138]\ttrain-auc:0.965277\teval-auc:0.95445\n",
      "[139]\ttrain-auc:0.965304\teval-auc:0.954493\n",
      "[140]\ttrain-auc:0.965458\teval-auc:0.954632\n",
      "[141]\ttrain-auc:0.965552\teval-auc:0.954698\n",
      "[142]\ttrain-auc:0.965645\teval-auc:0.954824\n",
      "[143]\ttrain-auc:0.965682\teval-auc:0.954749\n",
      "[144]\ttrain-auc:0.965798\teval-auc:0.954798\n",
      "[145]\ttrain-auc:0.965913\teval-auc:0.954937\n",
      "[146]\ttrain-auc:0.965991\teval-auc:0.955103\n",
      "[147]\ttrain-auc:0.966128\teval-auc:0.955142\n",
      "[148]\ttrain-auc:0.966187\teval-auc:0.955155\n",
      "[149]\ttrain-auc:0.966244\teval-auc:0.955252\n",
      "[150]\ttrain-auc:0.966306\teval-auc:0.95528\n",
      "[151]\ttrain-auc:0.966439\teval-auc:0.955408\n",
      "[152]\ttrain-auc:0.966544\teval-auc:0.955486\n",
      "[153]\ttrain-auc:0.966631\teval-auc:0.955528\n",
      "[154]\ttrain-auc:0.966887\teval-auc:0.955752\n",
      "[155]\ttrain-auc:0.967061\teval-auc:0.955904\n",
      "[156]\ttrain-auc:0.967098\teval-auc:0.955956\n",
      "[157]\ttrain-auc:0.967174\teval-auc:0.955965\n",
      "[158]\ttrain-auc:0.967316\teval-auc:0.956113\n",
      "[159]\ttrain-auc:0.967402\teval-auc:0.956204\n",
      "[160]\ttrain-auc:0.967435\teval-auc:0.956231\n",
      "[161]\ttrain-auc:0.967481\teval-auc:0.956253\n",
      "[162]\ttrain-auc:0.967539\teval-auc:0.956298\n",
      "[163]\ttrain-auc:0.96763\teval-auc:0.956397\n",
      "[164]\ttrain-auc:0.967724\teval-auc:0.956471\n",
      "[165]\ttrain-auc:0.967814\teval-auc:0.956526\n",
      "[166]\ttrain-auc:0.967902\teval-auc:0.956557\n",
      "[167]\ttrain-auc:0.968\teval-auc:0.956594\n",
      "[168]\ttrain-auc:0.968112\teval-auc:0.956671\n",
      "[169]\ttrain-auc:0.968244\teval-auc:0.956908\n",
      "[170]\ttrain-auc:0.96832\teval-auc:0.956928\n",
      "[171]\ttrain-auc:0.96844\teval-auc:0.957056\n",
      "[172]\ttrain-auc:0.9685\teval-auc:0.957068\n",
      "[173]\ttrain-auc:0.968578\teval-auc:0.957094\n",
      "[174]\ttrain-auc:0.968647\teval-auc:0.957114\n",
      "[175]\ttrain-auc:0.968715\teval-auc:0.957141\n",
      "[176]\ttrain-auc:0.968792\teval-auc:0.957152\n",
      "[177]\ttrain-auc:0.968853\teval-auc:0.957205\n",
      "[178]\ttrain-auc:0.968988\teval-auc:0.957313\n",
      "[179]\ttrain-auc:0.969019\teval-auc:0.957302\n",
      "[180]\ttrain-auc:0.969144\teval-auc:0.957494\n",
      "[181]\ttrain-auc:0.969221\teval-auc:0.957583\n",
      "[182]\ttrain-auc:0.9693\teval-auc:0.957611\n",
      "[183]\ttrain-auc:0.96936\teval-auc:0.957649\n",
      "[184]\ttrain-auc:0.969446\teval-auc:0.957673\n",
      "[185]\ttrain-auc:0.969484\teval-auc:0.957693\n",
      "[186]\ttrain-auc:0.969581\teval-auc:0.957793\n",
      "[187]\ttrain-auc:0.969639\teval-auc:0.957802\n",
      "[188]\ttrain-auc:0.96971\teval-auc:0.957818\n",
      "[189]\ttrain-auc:0.969728\teval-auc:0.957824\n",
      "[190]\ttrain-auc:0.969823\teval-auc:0.957893\n",
      "[191]\ttrain-auc:0.969906\teval-auc:0.957996\n",
      "[192]\ttrain-auc:0.969931\teval-auc:0.958026\n",
      "[193]\ttrain-auc:0.97001\teval-auc:0.958105\n",
      "[194]\ttrain-auc:0.970074\teval-auc:0.958127\n",
      "[195]\ttrain-auc:0.970168\teval-auc:0.958233\n",
      "[196]\ttrain-auc:0.970216\teval-auc:0.958288\n",
      "[197]\ttrain-auc:0.970273\teval-auc:0.958286\n",
      "[198]\ttrain-auc:0.970279\teval-auc:0.958334\n",
      "[199]\ttrain-auc:0.970317\teval-auc:0.958336\n",
      "[200]\ttrain-auc:0.970416\teval-auc:0.95847\n",
      "[201]\ttrain-auc:0.97055\teval-auc:0.958556\n",
      "[202]\ttrain-auc:0.97057\teval-auc:0.95854\n",
      "[203]\ttrain-auc:0.970631\teval-auc:0.958556\n",
      "[204]\ttrain-auc:0.970675\teval-auc:0.958562\n",
      "[205]\ttrain-auc:0.970762\teval-auc:0.958615\n",
      "[206]\ttrain-auc:0.97082\teval-auc:0.958612\n",
      "[207]\ttrain-auc:0.97087\teval-auc:0.95861\n",
      "[208]\ttrain-auc:0.970906\teval-auc:0.95867\n",
      "[209]\ttrain-auc:0.971005\teval-auc:0.958713\n",
      "[210]\ttrain-auc:0.971089\teval-auc:0.958707\n",
      "[211]\ttrain-auc:0.971173\teval-auc:0.958754\n",
      "[212]\ttrain-auc:0.971218\teval-auc:0.958751\n",
      "[213]\ttrain-auc:0.971281\teval-auc:0.958728\n",
      "[214]\ttrain-auc:0.971354\teval-auc:0.958736\n",
      "[215]\ttrain-auc:0.971427\teval-auc:0.95873\n",
      "[216]\ttrain-auc:0.971461\teval-auc:0.958758\n",
      "[217]\ttrain-auc:0.971508\teval-auc:0.958793\n",
      "[218]\ttrain-auc:0.971566\teval-auc:0.95881\n",
      "[219]\ttrain-auc:0.971641\teval-auc:0.958783\n",
      "[220]\ttrain-auc:0.971756\teval-auc:0.958865\n",
      "[221]\ttrain-auc:0.971828\teval-auc:0.958944\n",
      "[222]\ttrain-auc:0.971901\teval-auc:0.958974\n",
      "[223]\ttrain-auc:0.972025\teval-auc:0.959057\n",
      "[224]\ttrain-auc:0.972096\teval-auc:0.959072\n",
      "[225]\ttrain-auc:0.972155\teval-auc:0.9591\n",
      "[226]\ttrain-auc:0.972183\teval-auc:0.959173\n",
      "[227]\ttrain-auc:0.972226\teval-auc:0.959209\n",
      "[228]\ttrain-auc:0.972334\teval-auc:0.959323\n",
      "[229]\ttrain-auc:0.972399\teval-auc:0.959325\n",
      "[230]\ttrain-auc:0.972473\teval-auc:0.959399\n",
      "[231]\ttrain-auc:0.972477\teval-auc:0.959412\n",
      "[232]\ttrain-auc:0.972502\teval-auc:0.959397\n",
      "[233]\ttrain-auc:0.972575\teval-auc:0.959499\n",
      "[234]\ttrain-auc:0.972616\teval-auc:0.959523\n",
      "[235]\ttrain-auc:0.972707\teval-auc:0.959564\n",
      "[236]\ttrain-auc:0.972789\teval-auc:0.959589\n",
      "[237]\ttrain-auc:0.972825\teval-auc:0.959563\n",
      "[238]\ttrain-auc:0.972875\teval-auc:0.959617\n",
      "[239]\ttrain-auc:0.972899\teval-auc:0.959613\n",
      "[240]\ttrain-auc:0.972934\teval-auc:0.959599\n",
      "[241]\ttrain-auc:0.972979\teval-auc:0.959591\n",
      "[242]\ttrain-auc:0.973037\teval-auc:0.959643\n",
      "[243]\ttrain-auc:0.973094\teval-auc:0.959686\n",
      "[244]\ttrain-auc:0.973119\teval-auc:0.959728\n",
      "[245]\ttrain-auc:0.973187\teval-auc:0.959776\n",
      "[246]\ttrain-auc:0.973268\teval-auc:0.959823\n",
      "[247]\ttrain-auc:0.973319\teval-auc:0.959862\n",
      "[248]\ttrain-auc:0.973376\teval-auc:0.959836\n",
      "[249]\ttrain-auc:0.973409\teval-auc:0.959874\n",
      "[250]\ttrain-auc:0.973414\teval-auc:0.959873\n",
      "[251]\ttrain-auc:0.973441\teval-auc:0.959896\n",
      "[252]\ttrain-auc:0.973482\teval-auc:0.959903\n",
      "[253]\ttrain-auc:0.973532\teval-auc:0.959948\n",
      "[254]\ttrain-auc:0.973594\teval-auc:0.959956\n",
      "[255]\ttrain-auc:0.973648\teval-auc:0.960001\n",
      "[256]\ttrain-auc:0.973716\teval-auc:0.960045\n",
      "[257]\ttrain-auc:0.973787\teval-auc:0.960067\n",
      "[258]\ttrain-auc:0.973884\teval-auc:0.960192\n",
      "[259]\ttrain-auc:0.973931\teval-auc:0.960249\n",
      "[260]\ttrain-auc:0.973992\teval-auc:0.96028\n",
      "[261]\ttrain-auc:0.974015\teval-auc:0.960253\n",
      "[262]\ttrain-auc:0.974084\teval-auc:0.960315\n",
      "[263]\ttrain-auc:0.974149\teval-auc:0.960358\n",
      "[264]\ttrain-auc:0.974234\teval-auc:0.960416\n",
      "[265]\ttrain-auc:0.974287\teval-auc:0.960429\n",
      "[266]\ttrain-auc:0.974328\teval-auc:0.960462\n",
      "[267]\ttrain-auc:0.974392\teval-auc:0.960514\n",
      "[268]\ttrain-auc:0.974426\teval-auc:0.960531\n",
      "[269]\ttrain-auc:0.974486\teval-auc:0.960508\n",
      "[270]\ttrain-auc:0.974517\teval-auc:0.960581\n",
      "[271]\ttrain-auc:0.974558\teval-auc:0.960565\n",
      "[272]\ttrain-auc:0.974602\teval-auc:0.960609\n",
      "[273]\ttrain-auc:0.974642\teval-auc:0.960637\n",
      "[274]\ttrain-auc:0.974696\teval-auc:0.960649\n",
      "[275]\ttrain-auc:0.974762\teval-auc:0.96065\n",
      "[276]\ttrain-auc:0.974771\teval-auc:0.960652\n",
      "[277]\ttrain-auc:0.974795\teval-auc:0.960659\n",
      "[278]\ttrain-auc:0.974826\teval-auc:0.960674\n",
      "[279]\ttrain-auc:0.974911\teval-auc:0.960719\n",
      "[280]\ttrain-auc:0.974955\teval-auc:0.960748\n",
      "[281]\ttrain-auc:0.974968\teval-auc:0.960782\n",
      "[282]\ttrain-auc:0.975003\teval-auc:0.96077\n",
      "[283]\ttrain-auc:0.975079\teval-auc:0.960797\n",
      "[284]\ttrain-auc:0.975127\teval-auc:0.960849\n",
      "[285]\ttrain-auc:0.97521\teval-auc:0.961001\n",
      "[286]\ttrain-auc:0.97524\teval-auc:0.960952\n",
      "[287]\ttrain-auc:0.975287\teval-auc:0.960966\n",
      "[288]\ttrain-auc:0.975343\teval-auc:0.960983\n",
      "[289]\ttrain-auc:0.975383\teval-auc:0.960998\n",
      "[290]\ttrain-auc:0.975416\teval-auc:0.960986\n",
      "[291]\ttrain-auc:0.975427\teval-auc:0.960978\n",
      "[292]\ttrain-auc:0.975454\teval-auc:0.960971\n",
      "[293]\ttrain-auc:0.975494\teval-auc:0.960963\n",
      "[294]\ttrain-auc:0.975531\teval-auc:0.96099\n",
      "[295]\ttrain-auc:0.975571\teval-auc:0.961051\n",
      "[296]\ttrain-auc:0.9756\teval-auc:0.961064\n",
      "[297]\ttrain-auc:0.975619\teval-auc:0.961091\n",
      "[298]\ttrain-auc:0.975635\teval-auc:0.961079\n",
      "[299]\ttrain-auc:0.975666\teval-auc:0.961075\n",
      "[300]\ttrain-auc:0.975736\teval-auc:0.96113\n",
      "[301]\ttrain-auc:0.975787\teval-auc:0.961159\n",
      "[302]\ttrain-auc:0.975862\teval-auc:0.961227\n",
      "[303]\ttrain-auc:0.975917\teval-auc:0.96125\n",
      "[304]\ttrain-auc:0.975937\teval-auc:0.96119\n",
      "[305]\ttrain-auc:0.975966\teval-auc:0.961252\n",
      "[306]\ttrain-auc:0.975981\teval-auc:0.961249\n",
      "[307]\ttrain-auc:0.976002\teval-auc:0.961257\n",
      "[308]\ttrain-auc:0.976067\teval-auc:0.961271\n",
      "[309]\ttrain-auc:0.976087\teval-auc:0.961275\n",
      "[310]\ttrain-auc:0.976132\teval-auc:0.961307\n",
      "[311]\ttrain-auc:0.976173\teval-auc:0.961334\n",
      "[312]\ttrain-auc:0.976199\teval-auc:0.961307\n",
      "[313]\ttrain-auc:0.976223\teval-auc:0.961308\n",
      "[314]\ttrain-auc:0.97631\teval-auc:0.961338\n",
      "[315]\ttrain-auc:0.976418\teval-auc:0.961461\n",
      "[316]\ttrain-auc:0.976431\teval-auc:0.961457\n",
      "[317]\ttrain-auc:0.976485\teval-auc:0.961511\n",
      "[318]\ttrain-auc:0.976514\teval-auc:0.961534\n",
      "[319]\ttrain-auc:0.97656\teval-auc:0.96159\n",
      "[320]\ttrain-auc:0.976619\teval-auc:0.961678\n",
      "[321]\ttrain-auc:0.976649\teval-auc:0.961651\n",
      "[322]\ttrain-auc:0.976688\teval-auc:0.961709\n",
      "[323]\ttrain-auc:0.976738\teval-auc:0.961698\n",
      "[324]\ttrain-auc:0.976773\teval-auc:0.961712\n",
      "[325]\ttrain-auc:0.97681\teval-auc:0.961722\n",
      "[326]\ttrain-auc:0.976916\teval-auc:0.961772\n",
      "[327]\ttrain-auc:0.976977\teval-auc:0.961838\n",
      "[328]\ttrain-auc:0.977009\teval-auc:0.961814\n",
      "[329]\ttrain-auc:0.977057\teval-auc:0.961866\n",
      "[330]\ttrain-auc:0.977099\teval-auc:0.961842\n",
      "[331]\ttrain-auc:0.977125\teval-auc:0.961871\n",
      "[332]\ttrain-auc:0.97715\teval-auc:0.961913\n",
      "[333]\ttrain-auc:0.977203\teval-auc:0.961958\n",
      "[334]\ttrain-auc:0.977232\teval-auc:0.961929\n",
      "[335]\ttrain-auc:0.977285\teval-auc:0.961927\n",
      "[336]\ttrain-auc:0.977355\teval-auc:0.962028\n",
      "[337]\ttrain-auc:0.977421\teval-auc:0.962075\n",
      "[338]\ttrain-auc:0.977447\teval-auc:0.962097\n",
      "[339]\ttrain-auc:0.977494\teval-auc:0.962126\n",
      "[340]\ttrain-auc:0.977514\teval-auc:0.962109\n",
      "[341]\ttrain-auc:0.977551\teval-auc:0.962111\n",
      "[342]\ttrain-auc:0.977585\teval-auc:0.962053\n",
      "[343]\ttrain-auc:0.977608\teval-auc:0.962055\n",
      "[344]\ttrain-auc:0.977629\teval-auc:0.96206\n",
      "[345]\ttrain-auc:0.977641\teval-auc:0.962036\n",
      "[346]\ttrain-auc:0.977656\teval-auc:0.962021\n",
      "[347]\ttrain-auc:0.977718\teval-auc:0.962048\n",
      "[348]\ttrain-auc:0.977741\teval-auc:0.962064\n",
      "[349]\ttrain-auc:0.977789\teval-auc:0.962065\n",
      "[350]\ttrain-auc:0.977861\teval-auc:0.962093\n",
      "[351]\ttrain-auc:0.977899\teval-auc:0.962092\n",
      "[352]\ttrain-auc:0.977942\teval-auc:0.96212\n",
      "[353]\ttrain-auc:0.977998\teval-auc:0.962133\n",
      "[354]\ttrain-auc:0.978025\teval-auc:0.962092\n",
      "[355]\ttrain-auc:0.978082\teval-auc:0.962101\n",
      "[356]\ttrain-auc:0.978111\teval-auc:0.9621\n",
      "[357]\ttrain-auc:0.978143\teval-auc:0.962108\n",
      "[358]\ttrain-auc:0.978125\teval-auc:0.962131\n",
      "[359]\ttrain-auc:0.978168\teval-auc:0.962137\n",
      "[360]\ttrain-auc:0.978201\teval-auc:0.962191\n",
      "[361]\ttrain-auc:0.978238\teval-auc:0.962243\n",
      "[362]\ttrain-auc:0.978268\teval-auc:0.962323\n",
      "[363]\ttrain-auc:0.978272\teval-auc:0.96234\n",
      "[364]\ttrain-auc:0.9783\teval-auc:0.962336\n",
      "[365]\ttrain-auc:0.978328\teval-auc:0.962323\n",
      "[366]\ttrain-auc:0.978343\teval-auc:0.962309\n",
      "[367]\ttrain-auc:0.978394\teval-auc:0.962369\n",
      "[368]\ttrain-auc:0.978411\teval-auc:0.962393\n",
      "[369]\ttrain-auc:0.978477\teval-auc:0.962455\n",
      "[370]\ttrain-auc:0.978505\teval-auc:0.962457\n",
      "[371]\ttrain-auc:0.97851\teval-auc:0.962416\n",
      "[372]\ttrain-auc:0.978548\teval-auc:0.962435\n",
      "[373]\ttrain-auc:0.97859\teval-auc:0.962439\n",
      "[374]\ttrain-auc:0.978595\teval-auc:0.962446\n",
      "[375]\ttrain-auc:0.978631\teval-auc:0.962515\n",
      "[376]\ttrain-auc:0.978717\teval-auc:0.962626\n",
      "[377]\ttrain-auc:0.978744\teval-auc:0.962694\n",
      "[378]\ttrain-auc:0.978759\teval-auc:0.962675\n",
      "[379]\ttrain-auc:0.978783\teval-auc:0.962663\n",
      "[380]\ttrain-auc:0.978836\teval-auc:0.962699\n",
      "[381]\ttrain-auc:0.978882\teval-auc:0.96271\n",
      "[382]\ttrain-auc:0.978922\teval-auc:0.96272\n",
      "[383]\ttrain-auc:0.978962\teval-auc:0.962742\n",
      "[384]\ttrain-auc:0.978989\teval-auc:0.96277\n",
      "[385]\ttrain-auc:0.979048\teval-auc:0.962761\n",
      "[386]\ttrain-auc:0.979071\teval-auc:0.962781\n",
      "[387]\ttrain-auc:0.9791\teval-auc:0.962776\n",
      "[388]\ttrain-auc:0.979139\teval-auc:0.962814\n",
      "[389]\ttrain-auc:0.97916\teval-auc:0.962819\n",
      "[390]\ttrain-auc:0.979176\teval-auc:0.962792\n",
      "[391]\ttrain-auc:0.979198\teval-auc:0.962809\n",
      "[392]\ttrain-auc:0.979231\teval-auc:0.962861\n",
      "[393]\ttrain-auc:0.979301\teval-auc:0.962889\n",
      "[394]\ttrain-auc:0.979325\teval-auc:0.962935\n",
      "[395]\ttrain-auc:0.979369\teval-auc:0.963008\n",
      "[396]\ttrain-auc:0.979385\teval-auc:0.963021\n",
      "[397]\ttrain-auc:0.979399\teval-auc:0.963007\n",
      "[398]\ttrain-auc:0.979435\teval-auc:0.96301\n",
      "[399]\ttrain-auc:0.979457\teval-auc:0.963035\n",
      "[400]\ttrain-auc:0.979479\teval-auc:0.963015\n",
      "[401]\ttrain-auc:0.979518\teval-auc:0.963019\n",
      "[402]\ttrain-auc:0.979543\teval-auc:0.963042\n",
      "[403]\ttrain-auc:0.979588\teval-auc:0.96306\n",
      "[404]\ttrain-auc:0.979622\teval-auc:0.963076\n",
      "[405]\ttrain-auc:0.979656\teval-auc:0.963081\n",
      "[406]\ttrain-auc:0.979709\teval-auc:0.963085\n",
      "[407]\ttrain-auc:0.979737\teval-auc:0.963116\n",
      "[408]\ttrain-auc:0.979765\teval-auc:0.963127\n",
      "[409]\ttrain-auc:0.979797\teval-auc:0.963145\n",
      "[410]\ttrain-auc:0.979843\teval-auc:0.96319\n",
      "[411]\ttrain-auc:0.979891\teval-auc:0.963218\n",
      "[412]\ttrain-auc:0.97991\teval-auc:0.963242\n",
      "[413]\ttrain-auc:0.979941\teval-auc:0.963255\n",
      "[414]\ttrain-auc:0.979979\teval-auc:0.963246\n",
      "[415]\ttrain-auc:0.980021\teval-auc:0.963245\n",
      "[416]\ttrain-auc:0.980077\teval-auc:0.963268\n",
      "[417]\ttrain-auc:0.980129\teval-auc:0.963273\n",
      "[418]\ttrain-auc:0.980137\teval-auc:0.963264\n",
      "[419]\ttrain-auc:0.980152\teval-auc:0.963299\n",
      "[420]\ttrain-auc:0.980191\teval-auc:0.96332\n",
      "[421]\ttrain-auc:0.980216\teval-auc:0.963342\n",
      "[422]\ttrain-auc:0.980267\teval-auc:0.96338\n",
      "[423]\ttrain-auc:0.980294\teval-auc:0.963395\n",
      "[424]\ttrain-auc:0.980322\teval-auc:0.963401\n",
      "[425]\ttrain-auc:0.980371\teval-auc:0.963443\n",
      "[426]\ttrain-auc:0.980389\teval-auc:0.963437\n",
      "[427]\ttrain-auc:0.980428\teval-auc:0.963445\n",
      "[428]\ttrain-auc:0.980448\teval-auc:0.963419\n",
      "[429]\ttrain-auc:0.980503\teval-auc:0.963446\n",
      "[430]\ttrain-auc:0.980527\teval-auc:0.963454\n",
      "[431]\ttrain-auc:0.980556\teval-auc:0.963469\n",
      "[432]\ttrain-auc:0.980585\teval-auc:0.963462\n",
      "[433]\ttrain-auc:0.980594\teval-auc:0.963484\n",
      "[434]\ttrain-auc:0.980611\teval-auc:0.963493\n",
      "[435]\ttrain-auc:0.980662\teval-auc:0.963502\n",
      "[436]\ttrain-auc:0.980674\teval-auc:0.963518\n",
      "[437]\ttrain-auc:0.980738\teval-auc:0.96353\n",
      "[438]\ttrain-auc:0.980764\teval-auc:0.963556\n",
      "[439]\ttrain-auc:0.980804\teval-auc:0.963615\n",
      "[440]\ttrain-auc:0.980843\teval-auc:0.963642\n",
      "[441]\ttrain-auc:0.980856\teval-auc:0.96368\n",
      "[442]\ttrain-auc:0.980907\teval-auc:0.963729\n",
      "[443]\ttrain-auc:0.980932\teval-auc:0.963733\n",
      "[444]\ttrain-auc:0.980952\teval-auc:0.963717\n",
      "[445]\ttrain-auc:0.980969\teval-auc:0.963732\n",
      "[446]\ttrain-auc:0.981002\teval-auc:0.963803\n",
      "[447]\ttrain-auc:0.98104\teval-auc:0.963819\n",
      "[448]\ttrain-auc:0.981074\teval-auc:0.963824\n",
      "[449]\ttrain-auc:0.981101\teval-auc:0.963834\n",
      "[450]\ttrain-auc:0.98114\teval-auc:0.963834\n",
      "[451]\ttrain-auc:0.981154\teval-auc:0.96387\n",
      "[452]\ttrain-auc:0.981195\teval-auc:0.963858\n",
      "[453]\ttrain-auc:0.981214\teval-auc:0.963887\n",
      "[454]\ttrain-auc:0.981264\teval-auc:0.963857\n",
      "[455]\ttrain-auc:0.981297\teval-auc:0.963874\n",
      "[456]\ttrain-auc:0.981317\teval-auc:0.96387\n",
      "[457]\ttrain-auc:0.981333\teval-auc:0.963894\n",
      "[458]\ttrain-auc:0.981346\teval-auc:0.963907\n",
      "[459]\ttrain-auc:0.981361\teval-auc:0.963922\n",
      "[460]\ttrain-auc:0.981375\teval-auc:0.963933\n",
      "[461]\ttrain-auc:0.981385\teval-auc:0.96392\n",
      "[462]\ttrain-auc:0.981414\teval-auc:0.963949\n",
      "[463]\ttrain-auc:0.981438\teval-auc:0.963935\n",
      "[464]\ttrain-auc:0.981482\teval-auc:0.963981\n",
      "[465]\ttrain-auc:0.98151\teval-auc:0.963956\n",
      "[466]\ttrain-auc:0.981546\teval-auc:0.963996\n",
      "[467]\ttrain-auc:0.981574\teval-auc:0.963963\n",
      "[468]\ttrain-auc:0.981583\teval-auc:0.96396\n",
      "[469]\ttrain-auc:0.981641\teval-auc:0.963972\n",
      "[470]\ttrain-auc:0.981675\teval-auc:0.963984\n",
      "[471]\ttrain-auc:0.981722\teval-auc:0.96398\n",
      "[472]\ttrain-auc:0.981751\teval-auc:0.964016\n",
      "[473]\ttrain-auc:0.981786\teval-auc:0.964023\n",
      "[474]\ttrain-auc:0.981798\teval-auc:0.964048\n",
      "[475]\ttrain-auc:0.981817\teval-auc:0.964062\n",
      "[476]\ttrain-auc:0.981844\teval-auc:0.964082\n",
      "[477]\ttrain-auc:0.981881\teval-auc:0.964129\n",
      "[478]\ttrain-auc:0.981891\teval-auc:0.964112\n",
      "[479]\ttrain-auc:0.981916\teval-auc:0.96412\n",
      "[480]\ttrain-auc:0.981942\teval-auc:0.964144\n",
      "[481]\ttrain-auc:0.981959\teval-auc:0.964154\n",
      "[482]\ttrain-auc:0.981981\teval-auc:0.964227\n",
      "[483]\ttrain-auc:0.981989\teval-auc:0.964227\n",
      "[484]\ttrain-auc:0.982001\teval-auc:0.964227\n",
      "[485]\ttrain-auc:0.98202\teval-auc:0.964225\n",
      "[486]\ttrain-auc:0.98203\teval-auc:0.964231\n",
      "[487]\ttrain-auc:0.982059\teval-auc:0.964241\n",
      "[488]\ttrain-auc:0.982093\teval-auc:0.964248\n",
      "[489]\ttrain-auc:0.982118\teval-auc:0.964282\n",
      "[490]\ttrain-auc:0.982145\teval-auc:0.964295\n",
      "[491]\ttrain-auc:0.982167\teval-auc:0.96431\n",
      "[492]\ttrain-auc:0.982171\teval-auc:0.964336\n",
      "[493]\ttrain-auc:0.98219\teval-auc:0.964354\n",
      "[494]\ttrain-auc:0.982232\teval-auc:0.964361\n",
      "[495]\ttrain-auc:0.982237\teval-auc:0.964357\n",
      "[496]\ttrain-auc:0.982262\teval-auc:0.964375\n",
      "[497]\ttrain-auc:0.982313\teval-auc:0.964371\n",
      "[498]\ttrain-auc:0.982322\teval-auc:0.964349\n",
      "[499]\ttrain-auc:0.982357\teval-auc:0.964359\n",
      "[500]\ttrain-auc:0.982399\teval-auc:0.964372\n",
      "[501]\ttrain-auc:0.982428\teval-auc:0.964384\n",
      "[502]\ttrain-auc:0.982459\teval-auc:0.964405\n",
      "[503]\ttrain-auc:0.982465\teval-auc:0.964364\n",
      "[504]\ttrain-auc:0.982477\teval-auc:0.96434\n",
      "[505]\ttrain-auc:0.982491\teval-auc:0.964325\n",
      "[506]\ttrain-auc:0.982531\teval-auc:0.964353\n",
      "[507]\ttrain-auc:0.982575\teval-auc:0.964368\n",
      "[508]\ttrain-auc:0.982589\teval-auc:0.964364\n",
      "[509]\ttrain-auc:0.98261\teval-auc:0.964368\n",
      "[510]\ttrain-auc:0.98263\teval-auc:0.964409\n",
      "[511]\ttrain-auc:0.982645\teval-auc:0.964413\n",
      "[512]\ttrain-auc:0.982689\teval-auc:0.964464\n",
      "[513]\ttrain-auc:0.982698\teval-auc:0.964442\n",
      "[514]\ttrain-auc:0.982708\teval-auc:0.964433\n",
      "[515]\ttrain-auc:0.982729\teval-auc:0.964392\n",
      "[516]\ttrain-auc:0.982739\teval-auc:0.964394\n",
      "[517]\ttrain-auc:0.982745\teval-auc:0.964385\n",
      "[518]\ttrain-auc:0.982778\teval-auc:0.964363\n",
      "[519]\ttrain-auc:0.982771\teval-auc:0.964364\n",
      "[520]\ttrain-auc:0.982794\teval-auc:0.964391\n",
      "[521]\ttrain-auc:0.98281\teval-auc:0.964409\n",
      "[522]\ttrain-auc:0.982828\teval-auc:0.964412\n",
      "[523]\ttrain-auc:0.982854\teval-auc:0.96439\n",
      "[524]\ttrain-auc:0.982879\teval-auc:0.964423\n",
      "[525]\ttrain-auc:0.982887\teval-auc:0.964419\n",
      "[526]\ttrain-auc:0.982932\teval-auc:0.964412\n",
      "[527]\ttrain-auc:0.982952\teval-auc:0.964434\n",
      "[528]\ttrain-auc:0.982962\teval-auc:0.964445\n",
      "[529]\ttrain-auc:0.982967\teval-auc:0.964458\n",
      "[530]\ttrain-auc:0.982987\teval-auc:0.964416\n",
      "[531]\ttrain-auc:0.983003\teval-auc:0.964433\n",
      "[532]\ttrain-auc:0.983032\teval-auc:0.964433\n",
      "Stopping. Best iteration:\n",
      "[512]\ttrain-auc:0.982689\teval-auc:0.964464\n",
      "\n",
      "[  1.40679447e-04   4.28390544e-04   2.77773174e-03 ...,   2.96358615e-01\n",
      "   2.53377389e-02   3.65378886e-01]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/anaconda3/envs/tf/lib/python3.5/site-packages/ipykernel_launcher.py:60: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n"
     ]
    }
   ],
   "source": [
    "train_feat = train.drop(['userid','orderType'],axis=1)\n",
    "trainain_label = train['orderType']\n",
    "\n",
    "\n",
    "test_feat = test.drop(['userid','orderType'],axis=1)\n",
    "test_index = test[['userid']]\n",
    "\n",
    "\n",
    "from  sklearn.model_selection import train_test_split\n",
    "from  sklearn.model_selection import train_test_split\n",
    "train_x,val_x,train_y,val_y = train_test_split(train_feat,trainain_label,test_size=0.3,random_state=1)\n",
    "\n",
    "\n",
    "\n",
    "import xgboost as xgb\n",
    "dtrain = xgb.DMatrix(train_x,label=train_y)\n",
    "dval = xgb.DMatrix(val_x,val_y)\n",
    "\n",
    "param = {\n",
    "    'max_depth':3, \n",
    "    'learning_rate':0.1,\n",
    "      'n_estimators':10000, \n",
    "      'silent':False, \n",
    "      'objective':'binary:logistic', \n",
    "      'booster':'gbtree', \n",
    "      'n_jobs':10, \n",
    "      'nthread':100, \n",
    "      'gamma':0, \n",
    "      'min_child_weight':1,\n",
    "      'max_delta_step':0, \n",
    "      'subsample':0.8, \n",
    "      'colsample_bytree':0.8, \n",
    "      'colsample_bylevel':0.8, \n",
    "      'reg_alpha':0.2, \n",
    "      'reg_lambda':0.8, \n",
    "      'scale_pos_weight':0.16, \n",
    "      'seed':2017\n",
    "    \n",
    "#     'learning_rate':0.1,\n",
    "#     'n_estimators':1000,\n",
    "#     'max_depth':3,\n",
    "#     'gamma': 0.05,\n",
    "#     'subsample': 0.8,\n",
    "#     'colsample_bytree': 0.8,\n",
    "#     'eta': 0.03,\n",
    "#     'silent': 1,\n",
    "#     'objective':'binary:logistic',\n",
    "#     'scale_pos_weight':1\n",
    "}\n",
    "num_round =5000\n",
    "plst = list(param.items())\n",
    "plst +=[('eval_metric','auc')]\n",
    "evallist = [(dtrain,'train'),(dval,'eval')]\n",
    "\n",
    "bst = xgb.train(plst,dtrain,num_round,evallist,early_stopping_rounds=20)\n",
    "\n",
    "dtest = xgb.DMatrix(test_feat)\n",
    "pred = bst.predict(dtest)\n",
    "print(pred)\n",
    "test_index['orderType'] = pred\n",
    "test_index.to_csv('../result/sumbit_huang.csv',index=False)\n",
    "# param = {'learning_rate' : 0.1,\n",
    "#         'n_estimators': 1000,\n",
    "#         'max_depth': 2,\n",
    "#         'min_child_weight': 3,\n",
    "#         'gamma': 0,\n",
    "#         'subsample': 0.8,\n",
    "#         'colsample_bytree': 0.8,\n",
    "#         'eta': 0.03,\n",
    "#         'silent': 1,\n",
    "#         'objective':\n",
    "# #          'binary:logistic',\n",
    "#         'reg:linear',\n",
    "#         'scale_pos_weight':1}\n",
    "# num_round =150\n",
    "# plst = list(param.items())\n",
    "# plst += [('eval_metric', 'rmse')]\n",
    "# # plst += [('eval_metric', 'logloss')]\n",
    "# evallist = [ (dtrain, 'train'),(dval, 'eval')]\n",
    "# bst=xgb.train(plst,dtrain,num_round,evallist,early_stopping_rounds=30)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "finish\n"
     ]
    }
   ],
   "source": [
    "# train = train.head(100)\n",
    "# train_feat = train.drop(['userid','orderType'],axis=1)\n",
    "# trainain_label = train['orderType']\n",
    "# cols = train_feat.columns\n",
    "# test_feat = test.drop(['userid','orderType'],axis=1)\n",
    "# test_index = test[['userid']]\n",
    "# param = {\n",
    "#         'learning_rate':0.1,\n",
    "#         'n_estimators':1000,\n",
    "#         'max_depth':3,\n",
    "#         'gamma': 0,\n",
    "#         'subsample': 0.8,\n",
    "#         'colsample_bytree': 0.8,\n",
    "#         'eta': 0.03,\n",
    "#         'silent': 1,\n",
    "\n",
    "#         'objective':'binary:logistic',\n",
    "#         'scale_pos_weight':0.15,\n",
    "#         'seed':2017\n",
    "#     }\n",
    "import xgboost as xgb\n",
    "def xgb_train_online_find_best_round(bst,train_feat,train_label): \n",
    "    xgb_param = bst.get_xgb_params()  \n",
    "    dtrain = xgb.DMatrix(train_feat,label=train_label)\n",
    "    df = xgb.cv(xgb_param,dtrain,num_boost_round=bst.get_params()['n_estimators'],nfold=5,metrics='auc',early_stopping_rounds=20)\n",
    "    print(df)\n",
    "    nums = np.argmax(df['test-auc-mean'])\n",
    "    bst.set_params(n_estimators=nums)\n",
    "    print(nums)\n",
    "    return bst\n",
    "print('finish')\n",
    "    \n",
    "# plst.set_params(n_estimators=np.argmin(bst['test-auc-mean']))\n",
    "# plst.fit(dtrain[cols], dtrain['orderType'],eval_metric='auc')\n",
    "\n",
    "# dtest = xgb.DMatrix(test_feat)\n",
    "# pred = plst.predict(dtest)\n",
    "# pred_prob = plst.predict_proba(dtest)\n",
    "# print(pred)\n",
    "# print(pred_prob)\n",
    "# xgb_param = alg.get_xgb_params()    \n",
    "# xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values)    \n",
    "# cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,       \n",
    "#                   metrics='auc', early_stopping_rounds=early_stopping_rounds, show_progress=False)    \n",
    "# alg.set_params(n_estimators=cvresult.shape[0])#Fit the algorithm on the data\n",
    "# alg.fit(dtrain[predictors], dtrain['Disbursed'],eval_metric='auc')#Predict training set:\n",
    "# dtrain_predictions = alg.predict(dtrain[predictors])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/anaconda3/envs/tf/lib/python3.5/site-packages/jupyter_client/jsonutil.py:67: DeprecationWarning: Interpreting naive datetime as local 2018-02-05 01:53:57.931012. Please add timezone info to timestamps.\n",
      "  new_obj[k] = extract_dates(v)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "     test-auc-mean  test-auc-std  train-auc-mean  train-auc-std\n",
      "0         0.798318      0.023312        0.800173       0.019000\n",
      "1         0.856537      0.006281        0.858678       0.009660\n",
      "2         0.872525      0.010226        0.876537       0.006514\n",
      "3         0.878699      0.011657        0.881855       0.005964\n",
      "4         0.880413      0.011596        0.883187       0.005123\n",
      "5         0.882444      0.009487        0.884618       0.004369\n",
      "6         0.886932      0.011329        0.889066       0.006284\n",
      "7         0.890644      0.007644        0.892505       0.006268\n",
      "8         0.892527      0.008400        0.894852       0.006075\n",
      "9         0.893966      0.008289        0.896908       0.006221\n",
      "10        0.897444      0.007803        0.900263       0.003419\n",
      "11        0.898008      0.007799        0.900945       0.003423\n",
      "12        0.899709      0.006157        0.902643       0.002400\n",
      "13        0.900657      0.005876        0.903489       0.002382\n",
      "14        0.902788      0.005956        0.905808       0.002842\n",
      "15        0.903632      0.005858        0.906554       0.002693\n",
      "16        0.905648      0.006274        0.908361       0.002000\n",
      "17        0.906828      0.005560        0.909788       0.002391\n",
      "18        0.907179      0.005517        0.910291       0.003031\n",
      "19        0.909811      0.005208        0.912633       0.002032\n",
      "20        0.911417      0.004176        0.914154       0.003150\n",
      "21        0.912901      0.004311        0.915550       0.003115\n",
      "22        0.914256      0.004598        0.916811       0.002968\n",
      "23        0.915058      0.005086        0.917932       0.002637\n",
      "24        0.915502      0.005143        0.918965       0.002355\n",
      "25        0.916554      0.004982        0.920241       0.002289\n",
      "26        0.917204      0.005124        0.921189       0.002118\n",
      "27        0.918323      0.005052        0.922334       0.001699\n",
      "28        0.919314      0.004879        0.923453       0.001796\n",
      "29        0.920527      0.005885        0.924782       0.001856\n",
      "..             ...           ...             ...            ...\n",
      "487       0.966977      0.001835        0.988415       0.000232\n",
      "488       0.966981      0.001822        0.988448       0.000236\n",
      "489       0.966984      0.001815        0.988474       0.000229\n",
      "490       0.966965      0.001837        0.988491       0.000219\n",
      "491       0.966987      0.001834        0.988511       0.000235\n",
      "492       0.966996      0.001830        0.988540       0.000232\n",
      "493       0.966992      0.001852        0.988571       0.000227\n",
      "494       0.966989      0.001829        0.988602       0.000212\n",
      "495       0.967007      0.001838        0.988629       0.000218\n",
      "496       0.967004      0.001830        0.988649       0.000212\n",
      "497       0.967012      0.001818        0.988681       0.000212\n",
      "498       0.967023      0.001795        0.988709       0.000214\n",
      "499       0.967023      0.001784        0.988733       0.000203\n",
      "500       0.967018      0.001780        0.988765       0.000199\n",
      "501       0.967013      0.001778        0.988784       0.000204\n",
      "502       0.966999      0.001782        0.988794       0.000217\n",
      "503       0.967004      0.001799        0.988817       0.000231\n",
      "504       0.967016      0.001791        0.988837       0.000225\n",
      "505       0.967008      0.001780        0.988864       0.000223\n",
      "506       0.967006      0.001779        0.988887       0.000227\n",
      "507       0.967019      0.001780        0.988906       0.000222\n",
      "508       0.967007      0.001774        0.988929       0.000224\n",
      "509       0.967018      0.001780        0.988952       0.000227\n",
      "510       0.967038      0.001794        0.988979       0.000223\n",
      "511       0.967037      0.001781        0.988999       0.000220\n",
      "512       0.967027      0.001777        0.989023       0.000223\n",
      "513       0.967037      0.001779        0.989051       0.000210\n",
      "514       0.967037      0.001776        0.989090       0.000207\n",
      "515       0.967045      0.001763        0.989117       0.000210\n",
      "516       0.967050      0.001762        0.989147       0.000221\n",
      "\n",
      "[517 rows x 4 columns]\n",
      "516\n",
      "[  3.37704376e-04   9.59514218e-05   5.66111831e-03 ...,   2.92338908e-01\n",
      "   8.16270057e-03   3.69250566e-01]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/anaconda3/envs/tf/lib/python3.5/site-packages/ipykernel_launcher.py:32: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n"
     ]
    }
   ],
   "source": [
    "import xgboost as xgb\n",
    "bst=xgb.XGBClassifier(max_depth=4, learning_rate=0.1,\n",
    "              n_estimators=20000, \n",
    "              silent=False, \n",
    "              objective='binary:logistic', \n",
    "              booster='gbtree', \n",
    "              n_jobs=10, \n",
    "              nthread=1000, \n",
    "              gamma=0.2, \n",
    "              min_child_weight=1,\n",
    "              max_delta_step=0, \n",
    "              subsample=0.8, \n",
    "              colsample_bytree=0.8, \n",
    "              colsample_bylevel=0.8, \n",
    "              reg_alpha=0.3, \n",
    "              reg_lambda=0.7, \n",
    "              scale_pos_weight=0.16, \n",
    "              seed=2017001)\n",
    "# \n",
    "train_feat = train.drop(['userid','orderType'],axis=1)\n",
    "train_label = train['orderType']\n",
    "\n",
    "test_feat = test.drop(['userid','orderType'],axis=1)\n",
    "test_index = test[['userid']]\n",
    "bst = xgb_train_online_find_best_round(bst,train_feat,train_label)\n",
    "bst.fit(train_feat, train_label,eval_metric='auc')\n",
    "# pred = bst.predict(test_feat)\n",
    "# test_index['orderType'] = pred\n",
    "pred = bst.predict_proba(test_feat)\n",
    "print(pred[:,1])\n",
    "# print(pred)\n",
    "test_index['orderType'] = pred[:,1]\n",
    "# test_index['orderType'] = 1-test_index['orderType']\n",
    "test_index.to_csv('../result/sumbit_xgb.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (tf)",
   "language": "python",
   "name": "env-tf"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
